]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.2.4-201202032052.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.2.4-201202032052.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..0095943 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70 +exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74 @@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78 +gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90 @@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103 -linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107 @@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111 -media
112 mconf
113 +mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120 +mkpiggy
121 mkprep
122 mkregtable
123 mktables
124 @@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128 +regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132 @@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152 +vmlinux.bin.bz2
153 vmlinux.lds
154 +vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158 @@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zconf.lex.c
169 zoffset.h
170 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171 index 81c287f..d456d02 100644
172 --- a/Documentation/kernel-parameters.txt
173 +++ b/Documentation/kernel-parameters.txt
174 @@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179 + virtualization environments that don't cope well with the
180 + expand down segment used by UDEREF on X86-32 or the frequent
181 + page table updates on X86-64.
182 +
183 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184 +
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188 diff --git a/Makefile b/Makefile
189 index c8e187e..c445af7 100644
190 --- a/Makefile
191 +++ b/Makefile
192 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197 -HOSTCXXFLAGS = -O2
198 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208 -PHONY += scripts_basic
209 -scripts_basic:
210 +PHONY += scripts_basic gcc-plugins
211 +scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215 @@ -564,6 +565,46 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219 +ifndef DISABLE_PAX_PLUGINS
220 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223 +endif
224 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
225 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226 +STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227 +endif
228 +ifdef CONFIG_KALLOCSTAT_PLUGIN
229 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230 +endif
231 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233 +KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234 +endif
235 +ifdef CONFIG_CHECKER_PLUGIN
236 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238 +endif
239 +endif
240 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242 +ifeq ($(KBUILD_EXTMOD),)
243 +gcc-plugins:
244 + $(Q)$(MAKE) $(build)=tools/gcc
245 +else
246 +gcc-plugins: ;
247 +endif
248 +else
249 +gcc-plugins:
250 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
251 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
252 +else
253 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
254 +endif
255 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
256 +endif
257 +endif
258 +
259 include $(srctree)/arch/$(SRCARCH)/Makefile
260
261 ifneq ($(CONFIG_FRAME_WARN),0)
262 @@ -708,7 +749,7 @@ export mod_strip_cmd
263
264
265 ifeq ($(KBUILD_EXTMOD),)
266 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
267 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
268
269 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
270 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
271 @@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
272
273 # The actual objects are generated when descending,
274 # make sure no implicit rule kicks in
275 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
276 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
277
278 # Handle descending into subdirectories listed in $(vmlinux-dirs)
279 @@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280 # Error messages still appears in the original language
281
282 PHONY += $(vmlinux-dirs)
283 -$(vmlinux-dirs): prepare scripts
284 +$(vmlinux-dirs): gcc-plugins prepare scripts
285 $(Q)$(MAKE) $(build)=$@
286
287 # Store (new) KERNELRELASE string in include/config/kernel.release
288 @@ -985,6 +1027,7 @@ prepare0: archprepare FORCE
289 $(Q)$(MAKE) $(build)=.
290
291 # All the preparing..
292 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
293 prepare: prepare0
294
295 # Generate some files
296 @@ -1086,6 +1129,7 @@ all: modules
297 # using awk while concatenating to the final file.
298
299 PHONY += modules
300 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
301 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
302 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
303 @$(kecho) ' Building modules, stage 2.';
304 @@ -1101,7 +1145,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
305
306 # Target to prepare building external modules
307 PHONY += modules_prepare
308 -modules_prepare: prepare scripts
309 +modules_prepare: gcc-plugins prepare scripts
310
311 # Target to install modules
312 PHONY += modules_install
313 @@ -1198,6 +1242,7 @@ distclean: mrproper
314 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
315 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
316 -o -name '.*.rej' \
317 + -o -name '.*.rej' -o -name '*.so' \
318 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
319 -type f -print | xargs rm -f
320
321 @@ -1358,6 +1403,7 @@ PHONY += $(module-dirs) modules
322 $(module-dirs): crmodverdir $(objtree)/Module.symvers
323 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
324
325 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
326 modules: $(module-dirs)
327 @$(kecho) ' Building modules, stage 2.';
328 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
329 @@ -1484,17 +1530,19 @@ else
330 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
331 endif
332
333 -%.s: %.c prepare scripts FORCE
334 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
335 +%.s: %.c gcc-plugins prepare scripts FORCE
336 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
337 %.i: %.c prepare scripts FORCE
338 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
339 -%.o: %.c prepare scripts FORCE
340 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
341 +%.o: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.lst: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345 -%.s: %.S prepare scripts FORCE
346 +%.s: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348 -%.o: %.S prepare scripts FORCE
349 +%.o: %.S gcc-plugins prepare scripts FORCE
350 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
351 %.symtypes: %.c prepare scripts FORCE
352 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
353 @@ -1504,11 +1552,13 @@ endif
354 $(cmd_crmodverdir)
355 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
356 $(build)=$(build-dir)
357 -%/: prepare scripts FORCE
358 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
359 +%/: gcc-plugins prepare scripts FORCE
360 $(cmd_crmodverdir)
361 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
362 $(build)=$(build-dir)
363 -%.ko: prepare scripts FORCE
364 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
365 +%.ko: gcc-plugins prepare scripts FORCE
366 $(cmd_crmodverdir)
367 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
368 $(build)=$(build-dir) $(@:.ko=.o)
369 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
370 index da5449e..7418343 100644
371 --- a/arch/alpha/include/asm/elf.h
372 +++ b/arch/alpha/include/asm/elf.h
373 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
374
375 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
376
377 +#ifdef CONFIG_PAX_ASLR
378 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
379 +
380 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
381 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
382 +#endif
383 +
384 /* $0 is set by ld.so to a pointer to a function which might be
385 registered using atexit. This provides a mean for the dynamic
386 linker to call DT_FINI functions for shared libraries that have
387 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
388 index de98a73..bd4f1f8 100644
389 --- a/arch/alpha/include/asm/pgtable.h
390 +++ b/arch/alpha/include/asm/pgtable.h
391 @@ -101,6 +101,17 @@ struct vm_area_struct;
392 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
393 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
394 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
395 +
396 +#ifdef CONFIG_PAX_PAGEEXEC
397 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
398 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
399 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
400 +#else
401 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
402 +# define PAGE_COPY_NOEXEC PAGE_COPY
403 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
404 +#endif
405 +
406 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
407
408 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
409 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
410 index 2fd00b7..cfd5069 100644
411 --- a/arch/alpha/kernel/module.c
412 +++ b/arch/alpha/kernel/module.c
413 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
414
415 /* The small sections were sorted to the end of the segment.
416 The following should definitely cover them. */
417 - gp = (u64)me->module_core + me->core_size - 0x8000;
418 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
419 got = sechdrs[me->arch.gotsecindex].sh_addr;
420
421 for (i = 0; i < n; i++) {
422 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
423 index 01e8715..be0e80f 100644
424 --- a/arch/alpha/kernel/osf_sys.c
425 +++ b/arch/alpha/kernel/osf_sys.c
426 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
427 /* At this point: (!vma || addr < vma->vm_end). */
428 if (limit - len < addr)
429 return -ENOMEM;
430 - if (!vma || addr + len <= vma->vm_start)
431 + if (check_heap_stack_gap(vma, addr, len))
432 return addr;
433 addr = vma->vm_end;
434 vma = vma->vm_next;
435 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
436 merely specific addresses, but regions of memory -- perhaps
437 this feature should be incorporated into all ports? */
438
439 +#ifdef CONFIG_PAX_RANDMMAP
440 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
441 +#endif
442 +
443 if (addr) {
444 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
445 if (addr != (unsigned long) -ENOMEM)
446 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
447 }
448
449 /* Next, try allocating at TASK_UNMAPPED_BASE. */
450 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
451 - len, limit);
452 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
453 +
454 if (addr != (unsigned long) -ENOMEM)
455 return addr;
456
457 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
458 index fadd5f8..904e73a 100644
459 --- a/arch/alpha/mm/fault.c
460 +++ b/arch/alpha/mm/fault.c
461 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
462 __reload_thread(pcb);
463 }
464
465 +#ifdef CONFIG_PAX_PAGEEXEC
466 +/*
467 + * PaX: decide what to do with offenders (regs->pc = fault address)
468 + *
469 + * returns 1 when task should be killed
470 + * 2 when patched PLT trampoline was detected
471 + * 3 when unpatched PLT trampoline was detected
472 + */
473 +static int pax_handle_fetch_fault(struct pt_regs *regs)
474 +{
475 +
476 +#ifdef CONFIG_PAX_EMUPLT
477 + int err;
478 +
479 + do { /* PaX: patched PLT emulation #1 */
480 + unsigned int ldah, ldq, jmp;
481 +
482 + err = get_user(ldah, (unsigned int *)regs->pc);
483 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
484 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
485 +
486 + if (err)
487 + break;
488 +
489 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
490 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
491 + jmp == 0x6BFB0000U)
492 + {
493 + unsigned long r27, addr;
494 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
495 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
496 +
497 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
498 + err = get_user(r27, (unsigned long *)addr);
499 + if (err)
500 + break;
501 +
502 + regs->r27 = r27;
503 + regs->pc = r27;
504 + return 2;
505 + }
506 + } while (0);
507 +
508 + do { /* PaX: patched PLT emulation #2 */
509 + unsigned int ldah, lda, br;
510 +
511 + err = get_user(ldah, (unsigned int *)regs->pc);
512 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
513 + err |= get_user(br, (unsigned int *)(regs->pc+8));
514 +
515 + if (err)
516 + break;
517 +
518 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
519 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
520 + (br & 0xFFE00000U) == 0xC3E00000U)
521 + {
522 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
523 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
524 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
525 +
526 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
527 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
528 + return 2;
529 + }
530 + } while (0);
531 +
532 + do { /* PaX: unpatched PLT emulation */
533 + unsigned int br;
534 +
535 + err = get_user(br, (unsigned int *)regs->pc);
536 +
537 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
538 + unsigned int br2, ldq, nop, jmp;
539 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
540 +
541 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
542 + err = get_user(br2, (unsigned int *)addr);
543 + err |= get_user(ldq, (unsigned int *)(addr+4));
544 + err |= get_user(nop, (unsigned int *)(addr+8));
545 + err |= get_user(jmp, (unsigned int *)(addr+12));
546 + err |= get_user(resolver, (unsigned long *)(addr+16));
547 +
548 + if (err)
549 + break;
550 +
551 + if (br2 == 0xC3600000U &&
552 + ldq == 0xA77B000CU &&
553 + nop == 0x47FF041FU &&
554 + jmp == 0x6B7B0000U)
555 + {
556 + regs->r28 = regs->pc+4;
557 + regs->r27 = addr+16;
558 + regs->pc = resolver;
559 + return 3;
560 + }
561 + }
562 + } while (0);
563 +#endif
564 +
565 + return 1;
566 +}
567 +
568 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
569 +{
570 + unsigned long i;
571 +
572 + printk(KERN_ERR "PAX: bytes at PC: ");
573 + for (i = 0; i < 5; i++) {
574 + unsigned int c;
575 + if (get_user(c, (unsigned int *)pc+i))
576 + printk(KERN_CONT "???????? ");
577 + else
578 + printk(KERN_CONT "%08x ", c);
579 + }
580 + printk("\n");
581 +}
582 +#endif
583
584 /*
585 * This routine handles page faults. It determines the address,
586 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
587 good_area:
588 si_code = SEGV_ACCERR;
589 if (cause < 0) {
590 - if (!(vma->vm_flags & VM_EXEC))
591 + if (!(vma->vm_flags & VM_EXEC)) {
592 +
593 +#ifdef CONFIG_PAX_PAGEEXEC
594 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
595 + goto bad_area;
596 +
597 + up_read(&mm->mmap_sem);
598 + switch (pax_handle_fetch_fault(regs)) {
599 +
600 +#ifdef CONFIG_PAX_EMUPLT
601 + case 2:
602 + case 3:
603 + return;
604 +#endif
605 +
606 + }
607 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
608 + do_group_exit(SIGKILL);
609 +#else
610 goto bad_area;
611 +#endif
612 +
613 + }
614 } else if (!cause) {
615 /* Allow reads even for write-only mappings */
616 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
617 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
618 index 86976d0..8a57797 100644
619 --- a/arch/arm/include/asm/atomic.h
620 +++ b/arch/arm/include/asm/atomic.h
621 @@ -239,6 +239,14 @@ typedef struct {
622 u64 __aligned(8) counter;
623 } atomic64_t;
624
625 +#ifdef CONFIG_PAX_REFCOUNT
626 +typedef struct {
627 + u64 __aligned(8) counter;
628 +} atomic64_unchecked_t;
629 +#else
630 +typedef atomic64_t atomic64_unchecked_t;
631 +#endif
632 +
633 #define ATOMIC64_INIT(i) { (i) }
634
635 static inline u64 atomic64_read(atomic64_t *v)
636 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
637 index 0e9ce8d..6ef1e03 100644
638 --- a/arch/arm/include/asm/elf.h
639 +++ b/arch/arm/include/asm/elf.h
640 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
641 the loader. We need to make sure that it is out of the way of the program
642 that it will "exec", and that there is sufficient room for the brk. */
643
644 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
645 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
646 +
647 +#ifdef CONFIG_PAX_ASLR
648 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
649 +
650 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
651 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
652 +#endif
653
654 /* When the program starts, a1 contains a pointer to a function to be
655 registered with atexit, as per the SVR4 ABI. A value of 0 means we
656 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 extern void elf_set_personality(const struct elf32_hdr *);
658 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
659
660 -struct mm_struct;
661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
662 -#define arch_randomize_brk arch_randomize_brk
663 -
664 extern int vectors_user_mapping(void);
665 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
666 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
667 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
668 index e51b1e8..32a3113 100644
669 --- a/arch/arm/include/asm/kmap_types.h
670 +++ b/arch/arm/include/asm/kmap_types.h
671 @@ -21,6 +21,7 @@ enum km_type {
672 KM_L1_CACHE,
673 KM_L2_CACHE,
674 KM_KDB,
675 + KM_CLEARPAGE,
676 KM_TYPE_NR
677 };
678
679 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
680 index b293616..96310e5 100644
681 --- a/arch/arm/include/asm/uaccess.h
682 +++ b/arch/arm/include/asm/uaccess.h
683 @@ -22,6 +22,8 @@
684 #define VERIFY_READ 0
685 #define VERIFY_WRITE 1
686
687 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
688 +
689 /*
690 * The exception table consists of pairs of addresses: the first is the
691 * address of an instruction that is allowed to fault, and the second is
692 @@ -387,8 +389,23 @@ do { \
693
694
695 #ifdef CONFIG_MMU
696 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
697 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
698 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
699 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
700 +
701 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
702 +{
703 + if (!__builtin_constant_p(n))
704 + check_object_size(to, n, false);
705 + return ___copy_from_user(to, from, n);
706 +}
707 +
708 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
709 +{
710 + if (!__builtin_constant_p(n))
711 + check_object_size(from, n, true);
712 + return ___copy_to_user(to, from, n);
713 +}
714 +
715 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
716 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
717 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
718 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
719
720 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
721 {
722 + if ((long)n < 0)
723 + return n;
724 +
725 if (access_ok(VERIFY_READ, from, n))
726 n = __copy_from_user(to, from, n);
727 else /* security hole - plug it */
728 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
729
730 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
731 {
732 + if ((long)n < 0)
733 + return n;
734 +
735 if (access_ok(VERIFY_WRITE, to, n))
736 n = __copy_to_user(to, from, n);
737 return n;
738 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
739 index 5b0bce6..becd81c 100644
740 --- a/arch/arm/kernel/armksyms.c
741 +++ b/arch/arm/kernel/armksyms.c
742 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
743 #ifdef CONFIG_MMU
744 EXPORT_SYMBOL(copy_page);
745
746 -EXPORT_SYMBOL(__copy_from_user);
747 -EXPORT_SYMBOL(__copy_to_user);
748 +EXPORT_SYMBOL(___copy_from_user);
749 +EXPORT_SYMBOL(___copy_to_user);
750 EXPORT_SYMBOL(__clear_user);
751
752 EXPORT_SYMBOL(__get_user_1);
753 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
754 index 3d0c6fb..3dcae52 100644
755 --- a/arch/arm/kernel/process.c
756 +++ b/arch/arm/kernel/process.c
757 @@ -28,7 +28,6 @@
758 #include <linux/tick.h>
759 #include <linux/utsname.h>
760 #include <linux/uaccess.h>
761 -#include <linux/random.h>
762 #include <linux/hw_breakpoint.h>
763 #include <linux/cpuidle.h>
764
765 @@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
766 return 0;
767 }
768
769 -unsigned long arch_randomize_brk(struct mm_struct *mm)
770 -{
771 - unsigned long range_end = mm->brk + 0x02000000;
772 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
773 -}
774 -
775 #ifdef CONFIG_MMU
776 /*
777 * The vectors page is always readable from user space for the
778 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
779 index 99a5727..a3d5bb1 100644
780 --- a/arch/arm/kernel/traps.c
781 +++ b/arch/arm/kernel/traps.c
782 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
783
784 static DEFINE_RAW_SPINLOCK(die_lock);
785
786 +extern void gr_handle_kernel_exploit(void);
787 +
788 /*
789 * This function is protected against re-entrancy.
790 */
791 @@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
792 panic("Fatal exception in interrupt");
793 if (panic_on_oops)
794 panic("Fatal exception");
795 +
796 + gr_handle_kernel_exploit();
797 +
798 if (ret != NOTIFY_STOP)
799 do_exit(SIGSEGV);
800 }
801 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
802 index 66a477a..bee61d3 100644
803 --- a/arch/arm/lib/copy_from_user.S
804 +++ b/arch/arm/lib/copy_from_user.S
805 @@ -16,7 +16,7 @@
806 /*
807 * Prototype:
808 *
809 - * size_t __copy_from_user(void *to, const void *from, size_t n)
810 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
811 *
812 * Purpose:
813 *
814 @@ -84,11 +84,11 @@
815
816 .text
817
818 -ENTRY(__copy_from_user)
819 +ENTRY(___copy_from_user)
820
821 #include "copy_template.S"
822
823 -ENDPROC(__copy_from_user)
824 +ENDPROC(___copy_from_user)
825
826 .pushsection .fixup,"ax"
827 .align 0
828 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
829 index d066df6..df28194 100644
830 --- a/arch/arm/lib/copy_to_user.S
831 +++ b/arch/arm/lib/copy_to_user.S
832 @@ -16,7 +16,7 @@
833 /*
834 * Prototype:
835 *
836 - * size_t __copy_to_user(void *to, const void *from, size_t n)
837 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
838 *
839 * Purpose:
840 *
841 @@ -88,11 +88,11 @@
842 .text
843
844 ENTRY(__copy_to_user_std)
845 -WEAK(__copy_to_user)
846 +WEAK(___copy_to_user)
847
848 #include "copy_template.S"
849
850 -ENDPROC(__copy_to_user)
851 +ENDPROC(___copy_to_user)
852 ENDPROC(__copy_to_user_std)
853
854 .pushsection .fixup,"ax"
855 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
856 index d0ece2a..5ae2f39 100644
857 --- a/arch/arm/lib/uaccess.S
858 +++ b/arch/arm/lib/uaccess.S
859 @@ -20,7 +20,7 @@
860
861 #define PAGE_SHIFT 12
862
863 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
864 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
865 * Purpose : copy a block to user memory from kernel memory
866 * Params : to - user memory
867 * : from - kernel memory
868 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
869 sub r2, r2, ip
870 b .Lc2u_dest_aligned
871
872 -ENTRY(__copy_to_user)
873 +ENTRY(___copy_to_user)
874 stmfd sp!, {r2, r4 - r7, lr}
875 cmp r2, #4
876 blt .Lc2u_not_enough
877 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
878 ldrgtb r3, [r1], #0
879 USER( T(strgtb) r3, [r0], #1) @ May fault
880 b .Lc2u_finished
881 -ENDPROC(__copy_to_user)
882 +ENDPROC(___copy_to_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886 9001: ldmfd sp!, {r0, r4 - r7, pc}
887 .popsection
888
889 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
890 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
891 * Purpose : copy a block from user memory to kernel memory
892 * Params : to - kernel memory
893 * : from - user memory
894 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
895 sub r2, r2, ip
896 b .Lcfu_dest_aligned
897
898 -ENTRY(__copy_from_user)
899 +ENTRY(___copy_from_user)
900 stmfd sp!, {r0, r2, r4 - r7, lr}
901 cmp r2, #4
902 blt .Lcfu_not_enough
903 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
904 USER( T(ldrgtb) r3, [r1], #1) @ May fault
905 strgtb r3, [r0], #1
906 b .Lcfu_finished
907 -ENDPROC(__copy_from_user)
908 +ENDPROC(___copy_from_user)
909
910 .pushsection .fixup,"ax"
911 .align 0
912 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
913 index 025f742..8432b08 100644
914 --- a/arch/arm/lib/uaccess_with_memcpy.c
915 +++ b/arch/arm/lib/uaccess_with_memcpy.c
916 @@ -104,7 +104,7 @@ out:
917 }
918
919 unsigned long
920 -__copy_to_user(void __user *to, const void *from, unsigned long n)
921 +___copy_to_user(void __user *to, const void *from, unsigned long n)
922 {
923 /*
924 * This test is stubbed out of the main function above to keep
925 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
926 index 2b2d51c..0127490 100644
927 --- a/arch/arm/mach-ux500/mbox-db5500.c
928 +++ b/arch/arm/mach-ux500/mbox-db5500.c
929 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
930 return sprintf(buf, "0x%X\n", mbox_value);
931 }
932
933 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
934 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
935
936 static int mbox_show(struct seq_file *s, void *data)
937 {
938 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
939 index aa33949..b242a2f 100644
940 --- a/arch/arm/mm/fault.c
941 +++ b/arch/arm/mm/fault.c
942 @@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
943 }
944 #endif
945
946 +#ifdef CONFIG_PAX_PAGEEXEC
947 + if (fsr & FSR_LNX_PF) {
948 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
949 + do_group_exit(SIGKILL);
950 + }
951 +#endif
952 +
953 tsk->thread.address = addr;
954 tsk->thread.error_code = fsr;
955 tsk->thread.trap_no = 14;
956 @@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
957 }
958 #endif /* CONFIG_MMU */
959
960 +#ifdef CONFIG_PAX_PAGEEXEC
961 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
962 +{
963 + long i;
964 +
965 + printk(KERN_ERR "PAX: bytes at PC: ");
966 + for (i = 0; i < 20; i++) {
967 + unsigned char c;
968 + if (get_user(c, (__force unsigned char __user *)pc+i))
969 + printk(KERN_CONT "?? ");
970 + else
971 + printk(KERN_CONT "%02x ", c);
972 + }
973 + printk("\n");
974 +
975 + printk(KERN_ERR "PAX: bytes at SP-4: ");
976 + for (i = -1; i < 20; i++) {
977 + unsigned long c;
978 + if (get_user(c, (__force unsigned long __user *)sp+i))
979 + printk(KERN_CONT "???????? ");
980 + else
981 + printk(KERN_CONT "%08lx ", c);
982 + }
983 + printk("\n");
984 +}
985 +#endif
986 +
987 /*
988 * First Level Translation Fault Handler
989 *
990 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
991 index 44b628e..623ee2a 100644
992 --- a/arch/arm/mm/mmap.c
993 +++ b/arch/arm/mm/mmap.c
994 @@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
995 if (len > TASK_SIZE)
996 return -ENOMEM;
997
998 +#ifdef CONFIG_PAX_RANDMMAP
999 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1000 +#endif
1001 +
1002 if (addr) {
1003 if (do_align)
1004 addr = COLOUR_ALIGN(addr, pgoff);
1005 @@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1006 addr = PAGE_ALIGN(addr);
1007
1008 vma = find_vma(mm, addr);
1009 - if (TASK_SIZE - len >= addr &&
1010 - (!vma || addr + len <= vma->vm_start))
1011 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1012 return addr;
1013 }
1014 if (len > mm->cached_hole_size) {
1015 - start_addr = addr = mm->free_area_cache;
1016 + start_addr = addr = mm->free_area_cache;
1017 } else {
1018 - start_addr = addr = TASK_UNMAPPED_BASE;
1019 - mm->cached_hole_size = 0;
1020 + start_addr = addr = mm->mmap_base;
1021 + mm->cached_hole_size = 0;
1022 }
1023 /* 8 bits of randomness in 20 address space bits */
1024 if ((current->flags & PF_RANDOMIZE) &&
1025 @@ -89,14 +92,14 @@ full_search:
1026 * Start a new search - just in case we missed
1027 * some holes.
1028 */
1029 - if (start_addr != TASK_UNMAPPED_BASE) {
1030 - start_addr = addr = TASK_UNMAPPED_BASE;
1031 + if (start_addr != mm->mmap_base) {
1032 + start_addr = addr = mm->mmap_base;
1033 mm->cached_hole_size = 0;
1034 goto full_search;
1035 }
1036 return -ENOMEM;
1037 }
1038 - if (!vma || addr + len <= vma->vm_start) {
1039 + if (check_heap_stack_gap(vma, addr, len)) {
1040 /*
1041 * Remember the place where we stopped the search:
1042 */
1043 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1044 index 3b3159b..425ea94 100644
1045 --- a/arch/avr32/include/asm/elf.h
1046 +++ b/arch/avr32/include/asm/elf.h
1047 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1048 the loader. We need to make sure that it is out of the way of the program
1049 that it will "exec", and that there is sufficient room for the brk. */
1050
1051 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1052 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1053
1054 +#ifdef CONFIG_PAX_ASLR
1055 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1056 +
1057 +#define PAX_DELTA_MMAP_LEN 15
1058 +#define PAX_DELTA_STACK_LEN 15
1059 +#endif
1060
1061 /* This yields a mask that user programs can use to figure out what
1062 instruction set this CPU supports. This could be done in user space,
1063 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1064 index b7f5c68..556135c 100644
1065 --- a/arch/avr32/include/asm/kmap_types.h
1066 +++ b/arch/avr32/include/asm/kmap_types.h
1067 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1068 D(11) KM_IRQ1,
1069 D(12) KM_SOFTIRQ0,
1070 D(13) KM_SOFTIRQ1,
1071 -D(14) KM_TYPE_NR
1072 +D(14) KM_CLEARPAGE,
1073 +D(15) KM_TYPE_NR
1074 };
1075
1076 #undef D
1077 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1078 index f7040a1..db9f300 100644
1079 --- a/arch/avr32/mm/fault.c
1080 +++ b/arch/avr32/mm/fault.c
1081 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1082
1083 int exception_trace = 1;
1084
1085 +#ifdef CONFIG_PAX_PAGEEXEC
1086 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1087 +{
1088 + unsigned long i;
1089 +
1090 + printk(KERN_ERR "PAX: bytes at PC: ");
1091 + for (i = 0; i < 20; i++) {
1092 + unsigned char c;
1093 + if (get_user(c, (unsigned char *)pc+i))
1094 + printk(KERN_CONT "???????? ");
1095 + else
1096 + printk(KERN_CONT "%02x ", c);
1097 + }
1098 + printk("\n");
1099 +}
1100 +#endif
1101 +
1102 /*
1103 * This routine handles page faults. It determines the address and the
1104 * problem, and then passes it off to one of the appropriate routines.
1105 @@ -156,6 +173,16 @@ bad_area:
1106 up_read(&mm->mmap_sem);
1107
1108 if (user_mode(regs)) {
1109 +
1110 +#ifdef CONFIG_PAX_PAGEEXEC
1111 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1112 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1113 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1114 + do_group_exit(SIGKILL);
1115 + }
1116 + }
1117 +#endif
1118 +
1119 if (exception_trace && printk_ratelimit())
1120 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1121 "sp %08lx ecr %lu\n",
1122 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1123 index f8e16b2..c73ff79 100644
1124 --- a/arch/frv/include/asm/kmap_types.h
1125 +++ b/arch/frv/include/asm/kmap_types.h
1126 @@ -23,6 +23,7 @@ enum km_type {
1127 KM_IRQ1,
1128 KM_SOFTIRQ0,
1129 KM_SOFTIRQ1,
1130 + KM_CLEARPAGE,
1131 KM_TYPE_NR
1132 };
1133
1134 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1135 index 385fd30..6c3d97e 100644
1136 --- a/arch/frv/mm/elf-fdpic.c
1137 +++ b/arch/frv/mm/elf-fdpic.c
1138 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1139 if (addr) {
1140 addr = PAGE_ALIGN(addr);
1141 vma = find_vma(current->mm, addr);
1142 - if (TASK_SIZE - len >= addr &&
1143 - (!vma || addr + len <= vma->vm_start))
1144 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1145 goto success;
1146 }
1147
1148 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1149 for (; vma; vma = vma->vm_next) {
1150 if (addr > limit)
1151 break;
1152 - if (addr + len <= vma->vm_start)
1153 + if (check_heap_stack_gap(vma, addr, len))
1154 goto success;
1155 addr = vma->vm_end;
1156 }
1157 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1158 for (; vma; vma = vma->vm_next) {
1159 if (addr > limit)
1160 break;
1161 - if (addr + len <= vma->vm_start)
1162 + if (check_heap_stack_gap(vma, addr, len))
1163 goto success;
1164 addr = vma->vm_end;
1165 }
1166 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1167 index b5298eb..67c6e62 100644
1168 --- a/arch/ia64/include/asm/elf.h
1169 +++ b/arch/ia64/include/asm/elf.h
1170 @@ -42,6 +42,13 @@
1171 */
1172 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1173
1174 +#ifdef CONFIG_PAX_ASLR
1175 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1176 +
1177 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1178 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1179 +#endif
1180 +
1181 #define PT_IA_64_UNWIND 0x70000001
1182
1183 /* IA-64 relocations: */
1184 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1185 index 1a97af3..7529d31 100644
1186 --- a/arch/ia64/include/asm/pgtable.h
1187 +++ b/arch/ia64/include/asm/pgtable.h
1188 @@ -12,7 +12,7 @@
1189 * David Mosberger-Tang <davidm@hpl.hp.com>
1190 */
1191
1192 -
1193 +#include <linux/const.h>
1194 #include <asm/mman.h>
1195 #include <asm/page.h>
1196 #include <asm/processor.h>
1197 @@ -143,6 +143,17 @@
1198 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1199 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1200 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1201 +
1202 +#ifdef CONFIG_PAX_PAGEEXEC
1203 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1204 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1205 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1206 +#else
1207 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1208 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1209 +# define PAGE_COPY_NOEXEC PAGE_COPY
1210 +#endif
1211 +
1212 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1213 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1214 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1215 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1216 index b77768d..e0795eb 100644
1217 --- a/arch/ia64/include/asm/spinlock.h
1218 +++ b/arch/ia64/include/asm/spinlock.h
1219 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1220 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1221
1222 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1223 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1224 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1225 }
1226
1227 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1228 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1229 index 449c8c0..432a3d2 100644
1230 --- a/arch/ia64/include/asm/uaccess.h
1231 +++ b/arch/ia64/include/asm/uaccess.h
1232 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1233 const void *__cu_from = (from); \
1234 long __cu_len = (n); \
1235 \
1236 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1237 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1238 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1239 __cu_len; \
1240 })
1241 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1242 long __cu_len = (n); \
1243 \
1244 __chk_user_ptr(__cu_from); \
1245 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1246 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1247 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1248 __cu_len; \
1249 })
1250 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1251 index 24603be..948052d 100644
1252 --- a/arch/ia64/kernel/module.c
1253 +++ b/arch/ia64/kernel/module.c
1254 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1255 void
1256 module_free (struct module *mod, void *module_region)
1257 {
1258 - if (mod && mod->arch.init_unw_table &&
1259 - module_region == mod->module_init) {
1260 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1261 unw_remove_unwind_table(mod->arch.init_unw_table);
1262 mod->arch.init_unw_table = NULL;
1263 }
1264 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1265 }
1266
1267 static inline int
1268 +in_init_rx (const struct module *mod, uint64_t addr)
1269 +{
1270 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1271 +}
1272 +
1273 +static inline int
1274 +in_init_rw (const struct module *mod, uint64_t addr)
1275 +{
1276 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1277 +}
1278 +
1279 +static inline int
1280 in_init (const struct module *mod, uint64_t addr)
1281 {
1282 - return addr - (uint64_t) mod->module_init < mod->init_size;
1283 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1284 +}
1285 +
1286 +static inline int
1287 +in_core_rx (const struct module *mod, uint64_t addr)
1288 +{
1289 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1290 +}
1291 +
1292 +static inline int
1293 +in_core_rw (const struct module *mod, uint64_t addr)
1294 +{
1295 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1296 }
1297
1298 static inline int
1299 in_core (const struct module *mod, uint64_t addr)
1300 {
1301 - return addr - (uint64_t) mod->module_core < mod->core_size;
1302 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1303 }
1304
1305 static inline int
1306 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1307 break;
1308
1309 case RV_BDREL:
1310 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1311 + if (in_init_rx(mod, val))
1312 + val -= (uint64_t) mod->module_init_rx;
1313 + else if (in_init_rw(mod, val))
1314 + val -= (uint64_t) mod->module_init_rw;
1315 + else if (in_core_rx(mod, val))
1316 + val -= (uint64_t) mod->module_core_rx;
1317 + else if (in_core_rw(mod, val))
1318 + val -= (uint64_t) mod->module_core_rw;
1319 break;
1320
1321 case RV_LTV:
1322 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1323 * addresses have been selected...
1324 */
1325 uint64_t gp;
1326 - if (mod->core_size > MAX_LTOFF)
1327 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1328 /*
1329 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1330 * at the end of the module.
1331 */
1332 - gp = mod->core_size - MAX_LTOFF / 2;
1333 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1334 else
1335 - gp = mod->core_size / 2;
1336 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1337 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1338 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1339 mod->arch.gp = gp;
1340 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1341 }
1342 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1343 index 609d500..7dde2a8 100644
1344 --- a/arch/ia64/kernel/sys_ia64.c
1345 +++ b/arch/ia64/kernel/sys_ia64.c
1346 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1347 if (REGION_NUMBER(addr) == RGN_HPAGE)
1348 addr = 0;
1349 #endif
1350 +
1351 +#ifdef CONFIG_PAX_RANDMMAP
1352 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1353 + addr = mm->free_area_cache;
1354 + else
1355 +#endif
1356 +
1357 if (!addr)
1358 addr = mm->free_area_cache;
1359
1360 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1361 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1362 /* At this point: (!vma || addr < vma->vm_end). */
1363 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1364 - if (start_addr != TASK_UNMAPPED_BASE) {
1365 + if (start_addr != mm->mmap_base) {
1366 /* Start a new search --- just in case we missed some holes. */
1367 - addr = TASK_UNMAPPED_BASE;
1368 + addr = mm->mmap_base;
1369 goto full_search;
1370 }
1371 return -ENOMEM;
1372 }
1373 - if (!vma || addr + len <= vma->vm_start) {
1374 + if (check_heap_stack_gap(vma, addr, len)) {
1375 /* Remember the address where we stopped this search: */
1376 mm->free_area_cache = addr + len;
1377 return addr;
1378 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1379 index 53c0ba0..2accdde 100644
1380 --- a/arch/ia64/kernel/vmlinux.lds.S
1381 +++ b/arch/ia64/kernel/vmlinux.lds.S
1382 @@ -199,7 +199,7 @@ SECTIONS {
1383 /* Per-cpu data: */
1384 . = ALIGN(PERCPU_PAGE_SIZE);
1385 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1386 - __phys_per_cpu_start = __per_cpu_load;
1387 + __phys_per_cpu_start = per_cpu_load;
1388 /*
1389 * ensure percpu data fits
1390 * into percpu page size
1391 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1392 index 20b3593..1ce77f0 100644
1393 --- a/arch/ia64/mm/fault.c
1394 +++ b/arch/ia64/mm/fault.c
1395 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1396 return pte_present(pte);
1397 }
1398
1399 +#ifdef CONFIG_PAX_PAGEEXEC
1400 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1401 +{
1402 + unsigned long i;
1403 +
1404 + printk(KERN_ERR "PAX: bytes at PC: ");
1405 + for (i = 0; i < 8; i++) {
1406 + unsigned int c;
1407 + if (get_user(c, (unsigned int *)pc+i))
1408 + printk(KERN_CONT "???????? ");
1409 + else
1410 + printk(KERN_CONT "%08x ", c);
1411 + }
1412 + printk("\n");
1413 +}
1414 +#endif
1415 +
1416 void __kprobes
1417 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1418 {
1419 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1420 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1421 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1422
1423 - if ((vma->vm_flags & mask) != mask)
1424 + if ((vma->vm_flags & mask) != mask) {
1425 +
1426 +#ifdef CONFIG_PAX_PAGEEXEC
1427 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1428 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1429 + goto bad_area;
1430 +
1431 + up_read(&mm->mmap_sem);
1432 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1433 + do_group_exit(SIGKILL);
1434 + }
1435 +#endif
1436 +
1437 goto bad_area;
1438
1439 + }
1440 +
1441 /*
1442 * If for any reason at all we couldn't handle the fault, make
1443 * sure we exit gracefully rather than endlessly redo the
1444 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1445 index 5ca674b..e0e1b70 100644
1446 --- a/arch/ia64/mm/hugetlbpage.c
1447 +++ b/arch/ia64/mm/hugetlbpage.c
1448 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1449 /* At this point: (!vmm || addr < vmm->vm_end). */
1450 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1451 return -ENOMEM;
1452 - if (!vmm || (addr + len) <= vmm->vm_start)
1453 + if (check_heap_stack_gap(vmm, addr, len))
1454 return addr;
1455 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1456 }
1457 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1458 index 00cb0e2..2ad8024 100644
1459 --- a/arch/ia64/mm/init.c
1460 +++ b/arch/ia64/mm/init.c
1461 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1462 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1463 vma->vm_end = vma->vm_start + PAGE_SIZE;
1464 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1465 +
1466 +#ifdef CONFIG_PAX_PAGEEXEC
1467 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1468 + vma->vm_flags &= ~VM_EXEC;
1469 +
1470 +#ifdef CONFIG_PAX_MPROTECT
1471 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1472 + vma->vm_flags &= ~VM_MAYEXEC;
1473 +#endif
1474 +
1475 + }
1476 +#endif
1477 +
1478 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1479 down_write(&current->mm->mmap_sem);
1480 if (insert_vm_struct(current->mm, vma)) {
1481 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1482 index 82abd15..d95ae5d 100644
1483 --- a/arch/m32r/lib/usercopy.c
1484 +++ b/arch/m32r/lib/usercopy.c
1485 @@ -14,6 +14,9 @@
1486 unsigned long
1487 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1488 {
1489 + if ((long)n < 0)
1490 + return n;
1491 +
1492 prefetch(from);
1493 if (access_ok(VERIFY_WRITE, to, n))
1494 __copy_user(to,from,n);
1495 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1496 unsigned long
1497 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1498 {
1499 + if ((long)n < 0)
1500 + return n;
1501 +
1502 prefetchw(to);
1503 if (access_ok(VERIFY_READ, from, n))
1504 __copy_user_zeroing(to,from,n);
1505 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1506 index 455c0ac..ad65fbe 100644
1507 --- a/arch/mips/include/asm/elf.h
1508 +++ b/arch/mips/include/asm/elf.h
1509 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1510 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1511 #endif
1512
1513 +#ifdef CONFIG_PAX_ASLR
1514 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1515 +
1516 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1517 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1518 +#endif
1519 +
1520 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1521 struct linux_binprm;
1522 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1523 int uses_interp);
1524
1525 -struct mm_struct;
1526 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1527 -#define arch_randomize_brk arch_randomize_brk
1528 -
1529 #endif /* _ASM_ELF_H */
1530 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1531 index e59cd1a..8e329d6 100644
1532 --- a/arch/mips/include/asm/page.h
1533 +++ b/arch/mips/include/asm/page.h
1534 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1535 #ifdef CONFIG_CPU_MIPS32
1536 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1537 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1538 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1539 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1540 #else
1541 typedef struct { unsigned long long pte; } pte_t;
1542 #define pte_val(x) ((x).pte)
1543 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1544 index 6018c80..7c37203 100644
1545 --- a/arch/mips/include/asm/system.h
1546 +++ b/arch/mips/include/asm/system.h
1547 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1548 */
1549 #define __ARCH_WANT_UNLOCKED_CTXSW
1550
1551 -extern unsigned long arch_align_stack(unsigned long sp);
1552 +#define arch_align_stack(x) ((x) & ~0xfUL)
1553
1554 #endif /* _ASM_SYSTEM_H */
1555 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1556 index 9fdd8bc..4bd7f1a 100644
1557 --- a/arch/mips/kernel/binfmt_elfn32.c
1558 +++ b/arch/mips/kernel/binfmt_elfn32.c
1559 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1560 #undef ELF_ET_DYN_BASE
1561 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1562
1563 +#ifdef CONFIG_PAX_ASLR
1564 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1565 +
1566 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1567 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1568 +#endif
1569 +
1570 #include <asm/processor.h>
1571 #include <linux/module.h>
1572 #include <linux/elfcore.h>
1573 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1574 index ff44823..97f8906 100644
1575 --- a/arch/mips/kernel/binfmt_elfo32.c
1576 +++ b/arch/mips/kernel/binfmt_elfo32.c
1577 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1578 #undef ELF_ET_DYN_BASE
1579 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1580
1581 +#ifdef CONFIG_PAX_ASLR
1582 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1583 +
1584 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1585 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1586 +#endif
1587 +
1588 #include <asm/processor.h>
1589
1590 /*
1591 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1592 index c47f96e..661d418 100644
1593 --- a/arch/mips/kernel/process.c
1594 +++ b/arch/mips/kernel/process.c
1595 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1596 out:
1597 return pc;
1598 }
1599 -
1600 -/*
1601 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1602 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1603 - */
1604 -unsigned long arch_align_stack(unsigned long sp)
1605 -{
1606 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1607 - sp -= get_random_int() & ~PAGE_MASK;
1608 -
1609 - return sp & ALMASK;
1610 -}
1611 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1612 index 937cf33..adb39bb 100644
1613 --- a/arch/mips/mm/fault.c
1614 +++ b/arch/mips/mm/fault.c
1615 @@ -28,6 +28,23 @@
1616 #include <asm/highmem.h> /* For VMALLOC_END */
1617 #include <linux/kdebug.h>
1618
1619 +#ifdef CONFIG_PAX_PAGEEXEC
1620 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1621 +{
1622 + unsigned long i;
1623 +
1624 + printk(KERN_ERR "PAX: bytes at PC: ");
1625 + for (i = 0; i < 5; i++) {
1626 + unsigned int c;
1627 + if (get_user(c, (unsigned int *)pc+i))
1628 + printk(KERN_CONT "???????? ");
1629 + else
1630 + printk(KERN_CONT "%08x ", c);
1631 + }
1632 + printk("\n");
1633 +}
1634 +#endif
1635 +
1636 /*
1637 * This routine handles page faults. It determines the address,
1638 * and the problem, and then passes it off to one of the appropriate
1639 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1640 index 302d779..7d35bf8 100644
1641 --- a/arch/mips/mm/mmap.c
1642 +++ b/arch/mips/mm/mmap.c
1643 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1644 do_color_align = 1;
1645
1646 /* requesting a specific address */
1647 +
1648 +#ifdef CONFIG_PAX_RANDMMAP
1649 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1650 +#endif
1651 +
1652 if (addr) {
1653 if (do_color_align)
1654 addr = COLOUR_ALIGN(addr, pgoff);
1655 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1656 addr = PAGE_ALIGN(addr);
1657
1658 vma = find_vma(mm, addr);
1659 - if (TASK_SIZE - len >= addr &&
1660 - (!vma || addr + len <= vma->vm_start))
1661 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1662 return addr;
1663 }
1664
1665 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1666 /* At this point: (!vma || addr < vma->vm_end). */
1667 if (TASK_SIZE - len < addr)
1668 return -ENOMEM;
1669 - if (!vma || addr + len <= vma->vm_start)
1670 + if (check_heap_stack_gap(vmm, addr, len))
1671 return addr;
1672 addr = vma->vm_end;
1673 if (do_color_align)
1674 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1675 /* make sure it can fit in the remaining address space */
1676 if (likely(addr > len)) {
1677 vma = find_vma(mm, addr - len);
1678 - if (!vma || addr <= vma->vm_start) {
1679 + if (check_heap_stack_gap(vmm, addr - len, len))
1680 /* cache the address as a hint for next time */
1681 return mm->free_area_cache = addr - len;
1682 }
1683 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1684 * return with success:
1685 */
1686 vma = find_vma(mm, addr);
1687 - if (likely(!vma || addr + len <= vma->vm_start)) {
1688 + if (check_heap_stack_gap(vmm, addr, len)) {
1689 /* cache the address as a hint for next time */
1690 return mm->free_area_cache = addr;
1691 }
1692 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1693 mm->unmap_area = arch_unmap_area_topdown;
1694 }
1695 }
1696 -
1697 -static inline unsigned long brk_rnd(void)
1698 -{
1699 - unsigned long rnd = get_random_int();
1700 -
1701 - rnd = rnd << PAGE_SHIFT;
1702 - /* 8MB for 32bit, 256MB for 64bit */
1703 - if (TASK_IS_32BIT_ADDR)
1704 - rnd = rnd & 0x7ffffful;
1705 - else
1706 - rnd = rnd & 0xffffffful;
1707 -
1708 - return rnd;
1709 -}
1710 -
1711 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1712 -{
1713 - unsigned long base = mm->brk;
1714 - unsigned long ret;
1715 -
1716 - ret = PAGE_ALIGN(base + brk_rnd());
1717 -
1718 - if (ret < mm->brk)
1719 - return mm->brk;
1720 -
1721 - return ret;
1722 -}
1723 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1724 index 19f6cb1..6c78cf2 100644
1725 --- a/arch/parisc/include/asm/elf.h
1726 +++ b/arch/parisc/include/asm/elf.h
1727 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1728
1729 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1730
1731 +#ifdef CONFIG_PAX_ASLR
1732 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1733 +
1734 +#define PAX_DELTA_MMAP_LEN 16
1735 +#define PAX_DELTA_STACK_LEN 16
1736 +#endif
1737 +
1738 /* This yields a mask that user programs can use to figure out what
1739 instruction set this CPU supports. This could be done in user space,
1740 but it's not easy, and we've already done it here. */
1741 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1742 index 22dadeb..f6c2be4 100644
1743 --- a/arch/parisc/include/asm/pgtable.h
1744 +++ b/arch/parisc/include/asm/pgtable.h
1745 @@ -210,6 +210,17 @@ struct vm_area_struct;
1746 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1747 #define PAGE_COPY PAGE_EXECREAD
1748 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1749 +
1750 +#ifdef CONFIG_PAX_PAGEEXEC
1751 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1752 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1753 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1754 +#else
1755 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1756 +# define PAGE_COPY_NOEXEC PAGE_COPY
1757 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1758 +#endif
1759 +
1760 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1761 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1762 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1763 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1764 index 5e34ccf..672bc9c 100644
1765 --- a/arch/parisc/kernel/module.c
1766 +++ b/arch/parisc/kernel/module.c
1767 @@ -98,16 +98,38 @@
1768
1769 /* three functions to determine where in the module core
1770 * or init pieces the location is */
1771 +static inline int in_init_rx(struct module *me, void *loc)
1772 +{
1773 + return (loc >= me->module_init_rx &&
1774 + loc < (me->module_init_rx + me->init_size_rx));
1775 +}
1776 +
1777 +static inline int in_init_rw(struct module *me, void *loc)
1778 +{
1779 + return (loc >= me->module_init_rw &&
1780 + loc < (me->module_init_rw + me->init_size_rw));
1781 +}
1782 +
1783 static inline int in_init(struct module *me, void *loc)
1784 {
1785 - return (loc >= me->module_init &&
1786 - loc <= (me->module_init + me->init_size));
1787 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1788 +}
1789 +
1790 +static inline int in_core_rx(struct module *me, void *loc)
1791 +{
1792 + return (loc >= me->module_core_rx &&
1793 + loc < (me->module_core_rx + me->core_size_rx));
1794 +}
1795 +
1796 +static inline int in_core_rw(struct module *me, void *loc)
1797 +{
1798 + return (loc >= me->module_core_rw &&
1799 + loc < (me->module_core_rw + me->core_size_rw));
1800 }
1801
1802 static inline int in_core(struct module *me, void *loc)
1803 {
1804 - return (loc >= me->module_core &&
1805 - loc <= (me->module_core + me->core_size));
1806 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1807 }
1808
1809 static inline int in_local(struct module *me, void *loc)
1810 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1811 }
1812
1813 /* align things a bit */
1814 - me->core_size = ALIGN(me->core_size, 16);
1815 - me->arch.got_offset = me->core_size;
1816 - me->core_size += gots * sizeof(struct got_entry);
1817 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1818 + me->arch.got_offset = me->core_size_rw;
1819 + me->core_size_rw += gots * sizeof(struct got_entry);
1820
1821 - me->core_size = ALIGN(me->core_size, 16);
1822 - me->arch.fdesc_offset = me->core_size;
1823 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1824 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1825 + me->arch.fdesc_offset = me->core_size_rw;
1826 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1827
1828 me->arch.got_max = gots;
1829 me->arch.fdesc_max = fdescs;
1830 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1831
1832 BUG_ON(value == 0);
1833
1834 - got = me->module_core + me->arch.got_offset;
1835 + got = me->module_core_rw + me->arch.got_offset;
1836 for (i = 0; got[i].addr; i++)
1837 if (got[i].addr == value)
1838 goto out;
1839 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1840 #ifdef CONFIG_64BIT
1841 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1842 {
1843 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1844 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1845
1846 if (!value) {
1847 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1848 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1849
1850 /* Create new one */
1851 fdesc->addr = value;
1852 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1853 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1854 return (Elf_Addr)fdesc;
1855 }
1856 #endif /* CONFIG_64BIT */
1857 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1858
1859 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1860 end = table + sechdrs[me->arch.unwind_section].sh_size;
1861 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1862 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1863
1864 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1865 me->arch.unwind_section, table, end, gp);
1866 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1867 index c9b9322..02d8940 100644
1868 --- a/arch/parisc/kernel/sys_parisc.c
1869 +++ b/arch/parisc/kernel/sys_parisc.c
1870 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1871 /* At this point: (!vma || addr < vma->vm_end). */
1872 if (TASK_SIZE - len < addr)
1873 return -ENOMEM;
1874 - if (!vma || addr + len <= vma->vm_start)
1875 + if (check_heap_stack_gap(vma, addr, len))
1876 return addr;
1877 addr = vma->vm_end;
1878 }
1879 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1880 /* At this point: (!vma || addr < vma->vm_end). */
1881 if (TASK_SIZE - len < addr)
1882 return -ENOMEM;
1883 - if (!vma || addr + len <= vma->vm_start)
1884 + if (check_heap_stack_gap(vma, addr, len))
1885 return addr;
1886 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1887 if (addr < vma->vm_end) /* handle wraparound */
1888 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1889 if (flags & MAP_FIXED)
1890 return addr;
1891 if (!addr)
1892 - addr = TASK_UNMAPPED_BASE;
1893 + addr = current->mm->mmap_base;
1894
1895 if (filp) {
1896 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1897 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1898 index f19e660..414fe24 100644
1899 --- a/arch/parisc/kernel/traps.c
1900 +++ b/arch/parisc/kernel/traps.c
1901 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1902
1903 down_read(&current->mm->mmap_sem);
1904 vma = find_vma(current->mm,regs->iaoq[0]);
1905 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1906 - && (vma->vm_flags & VM_EXEC)) {
1907 -
1908 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1909 fault_address = regs->iaoq[0];
1910 fault_space = regs->iasq[0];
1911
1912 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1913 index 18162ce..94de376 100644
1914 --- a/arch/parisc/mm/fault.c
1915 +++ b/arch/parisc/mm/fault.c
1916 @@ -15,6 +15,7 @@
1917 #include <linux/sched.h>
1918 #include <linux/interrupt.h>
1919 #include <linux/module.h>
1920 +#include <linux/unistd.h>
1921
1922 #include <asm/uaccess.h>
1923 #include <asm/traps.h>
1924 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1925 static unsigned long
1926 parisc_acctyp(unsigned long code, unsigned int inst)
1927 {
1928 - if (code == 6 || code == 16)
1929 + if (code == 6 || code == 7 || code == 16)
1930 return VM_EXEC;
1931
1932 switch (inst & 0xf0000000) {
1933 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1934 }
1935 #endif
1936
1937 +#ifdef CONFIG_PAX_PAGEEXEC
1938 +/*
1939 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1940 + *
1941 + * returns 1 when task should be killed
1942 + * 2 when rt_sigreturn trampoline was detected
1943 + * 3 when unpatched PLT trampoline was detected
1944 + */
1945 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1946 +{
1947 +
1948 +#ifdef CONFIG_PAX_EMUPLT
1949 + int err;
1950 +
1951 + do { /* PaX: unpatched PLT emulation */
1952 + unsigned int bl, depwi;
1953 +
1954 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1955 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1956 +
1957 + if (err)
1958 + break;
1959 +
1960 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1961 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1962 +
1963 + err = get_user(ldw, (unsigned int *)addr);
1964 + err |= get_user(bv, (unsigned int *)(addr+4));
1965 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1966 +
1967 + if (err)
1968 + break;
1969 +
1970 + if (ldw == 0x0E801096U &&
1971 + bv == 0xEAC0C000U &&
1972 + ldw2 == 0x0E881095U)
1973 + {
1974 + unsigned int resolver, map;
1975 +
1976 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1977 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1978 + if (err)
1979 + break;
1980 +
1981 + regs->gr[20] = instruction_pointer(regs)+8;
1982 + regs->gr[21] = map;
1983 + regs->gr[22] = resolver;
1984 + regs->iaoq[0] = resolver | 3UL;
1985 + regs->iaoq[1] = regs->iaoq[0] + 4;
1986 + return 3;
1987 + }
1988 + }
1989 + } while (0);
1990 +#endif
1991 +
1992 +#ifdef CONFIG_PAX_EMUTRAMP
1993 +
1994 +#ifndef CONFIG_PAX_EMUSIGRT
1995 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1996 + return 1;
1997 +#endif
1998 +
1999 + do { /* PaX: rt_sigreturn emulation */
2000 + unsigned int ldi1, ldi2, bel, nop;
2001 +
2002 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2003 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2004 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2005 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2006 +
2007 + if (err)
2008 + break;
2009 +
2010 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2011 + ldi2 == 0x3414015AU &&
2012 + bel == 0xE4008200U &&
2013 + nop == 0x08000240U)
2014 + {
2015 + regs->gr[25] = (ldi1 & 2) >> 1;
2016 + regs->gr[20] = __NR_rt_sigreturn;
2017 + regs->gr[31] = regs->iaoq[1] + 16;
2018 + regs->sr[0] = regs->iasq[1];
2019 + regs->iaoq[0] = 0x100UL;
2020 + regs->iaoq[1] = regs->iaoq[0] + 4;
2021 + regs->iasq[0] = regs->sr[2];
2022 + regs->iasq[1] = regs->sr[2];
2023 + return 2;
2024 + }
2025 + } while (0);
2026 +#endif
2027 +
2028 + return 1;
2029 +}
2030 +
2031 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2032 +{
2033 + unsigned long i;
2034 +
2035 + printk(KERN_ERR "PAX: bytes at PC: ");
2036 + for (i = 0; i < 5; i++) {
2037 + unsigned int c;
2038 + if (get_user(c, (unsigned int *)pc+i))
2039 + printk(KERN_CONT "???????? ");
2040 + else
2041 + printk(KERN_CONT "%08x ", c);
2042 + }
2043 + printk("\n");
2044 +}
2045 +#endif
2046 +
2047 int fixup_exception(struct pt_regs *regs)
2048 {
2049 const struct exception_table_entry *fix;
2050 @@ -192,8 +303,33 @@ good_area:
2051
2052 acc_type = parisc_acctyp(code,regs->iir);
2053
2054 - if ((vma->vm_flags & acc_type) != acc_type)
2055 + if ((vma->vm_flags & acc_type) != acc_type) {
2056 +
2057 +#ifdef CONFIG_PAX_PAGEEXEC
2058 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2059 + (address & ~3UL) == instruction_pointer(regs))
2060 + {
2061 + up_read(&mm->mmap_sem);
2062 + switch (pax_handle_fetch_fault(regs)) {
2063 +
2064 +#ifdef CONFIG_PAX_EMUPLT
2065 + case 3:
2066 + return;
2067 +#endif
2068 +
2069 +#ifdef CONFIG_PAX_EMUTRAMP
2070 + case 2:
2071 + return;
2072 +#endif
2073 +
2074 + }
2075 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2076 + do_group_exit(SIGKILL);
2077 + }
2078 +#endif
2079 +
2080 goto bad_area;
2081 + }
2082
2083 /*
2084 * If for any reason at all we couldn't handle the fault, make
2085 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2086 index 3bf9cca..e7457d0 100644
2087 --- a/arch/powerpc/include/asm/elf.h
2088 +++ b/arch/powerpc/include/asm/elf.h
2089 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2090 the loader. We need to make sure that it is out of the way of the program
2091 that it will "exec", and that there is sufficient room for the brk. */
2092
2093 -extern unsigned long randomize_et_dyn(unsigned long base);
2094 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2095 +#define ELF_ET_DYN_BASE (0x20000000)
2096 +
2097 +#ifdef CONFIG_PAX_ASLR
2098 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2099 +
2100 +#ifdef __powerpc64__
2101 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2102 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2103 +#else
2104 +#define PAX_DELTA_MMAP_LEN 15
2105 +#define PAX_DELTA_STACK_LEN 15
2106 +#endif
2107 +#endif
2108
2109 /*
2110 * Our registers are always unsigned longs, whether we're a 32 bit
2111 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2112 (0x7ff >> (PAGE_SHIFT - 12)) : \
2113 (0x3ffff >> (PAGE_SHIFT - 12)))
2114
2115 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2116 -#define arch_randomize_brk arch_randomize_brk
2117 -
2118 #endif /* __KERNEL__ */
2119
2120 /*
2121 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2122 index bca8fdc..61e9580 100644
2123 --- a/arch/powerpc/include/asm/kmap_types.h
2124 +++ b/arch/powerpc/include/asm/kmap_types.h
2125 @@ -27,6 +27,7 @@ enum km_type {
2126 KM_PPC_SYNC_PAGE,
2127 KM_PPC_SYNC_ICACHE,
2128 KM_KDB,
2129 + KM_CLEARPAGE,
2130 KM_TYPE_NR
2131 };
2132
2133 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2134 index d4a7f64..451de1c 100644
2135 --- a/arch/powerpc/include/asm/mman.h
2136 +++ b/arch/powerpc/include/asm/mman.h
2137 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2138 }
2139 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2140
2141 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2142 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2143 {
2144 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2145 }
2146 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2147 index dd9c4fd..a2ced87 100644
2148 --- a/arch/powerpc/include/asm/page.h
2149 +++ b/arch/powerpc/include/asm/page.h
2150 @@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156 +#define VM_DATA_DEFAULT_FLAGS32 \
2157 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162 @@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166 +#define ktla_ktva(addr) (addr)
2167 +#define ktva_ktla(addr) (addr)
2168 +
2169 /*
2170 * Use the top bit of the higher-level page table entries to indicate whether
2171 * the entries we point to contain hugepages. This works because we know that
2172 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2173 index fb40ede..d3ce956 100644
2174 --- a/arch/powerpc/include/asm/page_64.h
2175 +++ b/arch/powerpc/include/asm/page_64.h
2176 @@ -144,15 +144,18 @@ do { \
2177 * stack by default, so in the absence of a PT_GNU_STACK program header
2178 * we turn execute permission off.
2179 */
2180 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2181 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2182 +#define VM_STACK_DEFAULT_FLAGS32 \
2183 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2184 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2185
2186 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2187 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2188
2189 +#ifndef CONFIG_PAX_PAGEEXEC
2190 #define VM_STACK_DEFAULT_FLAGS \
2191 (is_32bit_task() ? \
2192 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2193 +#endif
2194
2195 #include <asm-generic/getorder.h>
2196
2197 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2198 index 88b0bd9..e32bc67 100644
2199 --- a/arch/powerpc/include/asm/pgtable.h
2200 +++ b/arch/powerpc/include/asm/pgtable.h
2201 @@ -2,6 +2,7 @@
2202 #define _ASM_POWERPC_PGTABLE_H
2203 #ifdef __KERNEL__
2204
2205 +#include <linux/const.h>
2206 #ifndef __ASSEMBLY__
2207 #include <asm/processor.h> /* For TASK_SIZE */
2208 #include <asm/mmu.h>
2209 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2210 index 4aad413..85d86bf 100644
2211 --- a/arch/powerpc/include/asm/pte-hash32.h
2212 +++ b/arch/powerpc/include/asm/pte-hash32.h
2213 @@ -21,6 +21,7 @@
2214 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2215 #define _PAGE_USER 0x004 /* usermode access allowed */
2216 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2217 +#define _PAGE_EXEC _PAGE_GUARDED
2218 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2219 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2220 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2221 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2222 index 559da19..7e5835c 100644
2223 --- a/arch/powerpc/include/asm/reg.h
2224 +++ b/arch/powerpc/include/asm/reg.h
2225 @@ -212,6 +212,7 @@
2226 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2227 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2228 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2229 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2230 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2231 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2232 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2233 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2234 index e30a13d..2b7d994 100644
2235 --- a/arch/powerpc/include/asm/system.h
2236 +++ b/arch/powerpc/include/asm/system.h
2237 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2238 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2239 #endif
2240
2241 -extern unsigned long arch_align_stack(unsigned long sp);
2242 +#define arch_align_stack(x) ((x) & ~0xfUL)
2243
2244 /* Used in very early kernel initialization. */
2245 extern unsigned long reloc_offset(void);
2246 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2247 index bd0fb84..a42a14b 100644
2248 --- a/arch/powerpc/include/asm/uaccess.h
2249 +++ b/arch/powerpc/include/asm/uaccess.h
2250 @@ -13,6 +13,8 @@
2251 #define VERIFY_READ 0
2252 #define VERIFY_WRITE 1
2253
2254 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2255 +
2256 /*
2257 * The fs value determines whether argument validity checking should be
2258 * performed or not. If get_fs() == USER_DS, checking is performed, with
2259 @@ -327,52 +329,6 @@ do { \
2260 extern unsigned long __copy_tofrom_user(void __user *to,
2261 const void __user *from, unsigned long size);
2262
2263 -#ifndef __powerpc64__
2264 -
2265 -static inline unsigned long copy_from_user(void *to,
2266 - const void __user *from, unsigned long n)
2267 -{
2268 - unsigned long over;
2269 -
2270 - if (access_ok(VERIFY_READ, from, n))
2271 - return __copy_tofrom_user((__force void __user *)to, from, n);
2272 - if ((unsigned long)from < TASK_SIZE) {
2273 - over = (unsigned long)from + n - TASK_SIZE;
2274 - return __copy_tofrom_user((__force void __user *)to, from,
2275 - n - over) + over;
2276 - }
2277 - return n;
2278 -}
2279 -
2280 -static inline unsigned long copy_to_user(void __user *to,
2281 - const void *from, unsigned long n)
2282 -{
2283 - unsigned long over;
2284 -
2285 - if (access_ok(VERIFY_WRITE, to, n))
2286 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2287 - if ((unsigned long)to < TASK_SIZE) {
2288 - over = (unsigned long)to + n - TASK_SIZE;
2289 - return __copy_tofrom_user(to, (__force void __user *)from,
2290 - n - over) + over;
2291 - }
2292 - return n;
2293 -}
2294 -
2295 -#else /* __powerpc64__ */
2296 -
2297 -#define __copy_in_user(to, from, size) \
2298 - __copy_tofrom_user((to), (from), (size))
2299 -
2300 -extern unsigned long copy_from_user(void *to, const void __user *from,
2301 - unsigned long n);
2302 -extern unsigned long copy_to_user(void __user *to, const void *from,
2303 - unsigned long n);
2304 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2305 - unsigned long n);
2306 -
2307 -#endif /* __powerpc64__ */
2308 -
2309 static inline unsigned long __copy_from_user_inatomic(void *to,
2310 const void __user *from, unsigned long n)
2311 {
2312 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2313 if (ret == 0)
2314 return 0;
2315 }
2316 +
2317 + if (!__builtin_constant_p(n))
2318 + check_object_size(to, n, false);
2319 +
2320 return __copy_tofrom_user((__force void __user *)to, from, n);
2321 }
2322
2323 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2324 if (ret == 0)
2325 return 0;
2326 }
2327 +
2328 + if (!__builtin_constant_p(n))
2329 + check_object_size(from, n, true);
2330 +
2331 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2332 }
2333
2334 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2335 return __copy_to_user_inatomic(to, from, size);
2336 }
2337
2338 +#ifndef __powerpc64__
2339 +
2340 +static inline unsigned long __must_check copy_from_user(void *to,
2341 + const void __user *from, unsigned long n)
2342 +{
2343 + unsigned long over;
2344 +
2345 + if ((long)n < 0)
2346 + return n;
2347 +
2348 + if (access_ok(VERIFY_READ, from, n)) {
2349 + if (!__builtin_constant_p(n))
2350 + check_object_size(to, n, false);
2351 + return __copy_tofrom_user((__force void __user *)to, from, n);
2352 + }
2353 + if ((unsigned long)from < TASK_SIZE) {
2354 + over = (unsigned long)from + n - TASK_SIZE;
2355 + if (!__builtin_constant_p(n - over))
2356 + check_object_size(to, n - over, false);
2357 + return __copy_tofrom_user((__force void __user *)to, from,
2358 + n - over) + over;
2359 + }
2360 + return n;
2361 +}
2362 +
2363 +static inline unsigned long __must_check copy_to_user(void __user *to,
2364 + const void *from, unsigned long n)
2365 +{
2366 + unsigned long over;
2367 +
2368 + if ((long)n < 0)
2369 + return n;
2370 +
2371 + if (access_ok(VERIFY_WRITE, to, n)) {
2372 + if (!__builtin_constant_p(n))
2373 + check_object_size(from, n, true);
2374 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2375 + }
2376 + if ((unsigned long)to < TASK_SIZE) {
2377 + over = (unsigned long)to + n - TASK_SIZE;
2378 + if (!__builtin_constant_p(n))
2379 + check_object_size(from, n - over, true);
2380 + return __copy_tofrom_user(to, (__force void __user *)from,
2381 + n - over) + over;
2382 + }
2383 + return n;
2384 +}
2385 +
2386 +#else /* __powerpc64__ */
2387 +
2388 +#define __copy_in_user(to, from, size) \
2389 + __copy_tofrom_user((to), (from), (size))
2390 +
2391 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2392 +{
2393 + if ((long)n < 0 || n > INT_MAX)
2394 + return n;
2395 +
2396 + if (!__builtin_constant_p(n))
2397 + check_object_size(to, n, false);
2398 +
2399 + if (likely(access_ok(VERIFY_READ, from, n)))
2400 + n = __copy_from_user(to, from, n);
2401 + else
2402 + memset(to, 0, n);
2403 + return n;
2404 +}
2405 +
2406 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2407 +{
2408 + if ((long)n < 0 || n > INT_MAX)
2409 + return n;
2410 +
2411 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2412 + if (!__builtin_constant_p(n))
2413 + check_object_size(from, n, true);
2414 + n = __copy_to_user(to, from, n);
2415 + }
2416 + return n;
2417 +}
2418 +
2419 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2420 + unsigned long n);
2421 +
2422 +#endif /* __powerpc64__ */
2423 +
2424 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2425
2426 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2427 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2428 index 429983c..7af363b 100644
2429 --- a/arch/powerpc/kernel/exceptions-64e.S
2430 +++ b/arch/powerpc/kernel/exceptions-64e.S
2431 @@ -587,6 +587,7 @@ storage_fault_common:
2432 std r14,_DAR(r1)
2433 std r15,_DSISR(r1)
2434 addi r3,r1,STACK_FRAME_OVERHEAD
2435 + bl .save_nvgprs
2436 mr r4,r14
2437 mr r5,r15
2438 ld r14,PACA_EXGEN+EX_R14(r13)
2439 @@ -596,8 +597,7 @@ storage_fault_common:
2440 cmpdi r3,0
2441 bne- 1f
2442 b .ret_from_except_lite
2443 -1: bl .save_nvgprs
2444 - mr r5,r3
2445 +1: mr r5,r3
2446 addi r3,r1,STACK_FRAME_OVERHEAD
2447 ld r4,_DAR(r1)
2448 bl .bad_page_fault
2449 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2450 index cf9c69b..ebc9640 100644
2451 --- a/arch/powerpc/kernel/exceptions-64s.S
2452 +++ b/arch/powerpc/kernel/exceptions-64s.S
2453 @@ -1004,10 +1004,10 @@ handle_page_fault:
2454 11: ld r4,_DAR(r1)
2455 ld r5,_DSISR(r1)
2456 addi r3,r1,STACK_FRAME_OVERHEAD
2457 + bl .save_nvgprs
2458 bl .do_page_fault
2459 cmpdi r3,0
2460 beq+ 13f
2461 - bl .save_nvgprs
2462 mr r5,r3
2463 addi r3,r1,STACK_FRAME_OVERHEAD
2464 lwz r4,_DAR(r1)
2465 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2466 index 0b6d796..d760ddb 100644
2467 --- a/arch/powerpc/kernel/module_32.c
2468 +++ b/arch/powerpc/kernel/module_32.c
2469 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2470 me->arch.core_plt_section = i;
2471 }
2472 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2473 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2474 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2475 return -ENOEXEC;
2476 }
2477
2478 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2479
2480 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2481 /* Init, or core PLT? */
2482 - if (location >= mod->module_core
2483 - && location < mod->module_core + mod->core_size)
2484 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2485 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2486 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2487 - else
2488 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2489 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2490 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2491 + else {
2492 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2493 + return ~0UL;
2494 + }
2495
2496 /* Find this entry, or if that fails, the next avail. entry */
2497 while (entry->jump[0]) {
2498 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2499 index 6457574..08b28d3 100644
2500 --- a/arch/powerpc/kernel/process.c
2501 +++ b/arch/powerpc/kernel/process.c
2502 @@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2503 * Lookup NIP late so we have the best change of getting the
2504 * above info out without failing
2505 */
2506 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2507 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2508 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2509 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2510 #endif
2511 show_stack(current, (unsigned long *) regs->gpr[1]);
2512 if (!user_mode(regs))
2513 @@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2514 newsp = stack[0];
2515 ip = stack[STACK_FRAME_LR_SAVE];
2516 if (!firstframe || ip != lr) {
2517 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2518 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2520 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2521 - printk(" (%pS)",
2522 + printk(" (%pA)",
2523 (void *)current->ret_stack[curr_frame].ret);
2524 curr_frame--;
2525 }
2526 @@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2527 struct pt_regs *regs = (struct pt_regs *)
2528 (sp + STACK_FRAME_OVERHEAD);
2529 lr = regs->link;
2530 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2531 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2532 regs->trap, (void *)regs->nip, (void *)lr);
2533 firstframe = 1;
2534 }
2535 @@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2536 }
2537
2538 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2539 -
2540 -unsigned long arch_align_stack(unsigned long sp)
2541 -{
2542 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2543 - sp -= get_random_int() & ~PAGE_MASK;
2544 - return sp & ~0xf;
2545 -}
2546 -
2547 -static inline unsigned long brk_rnd(void)
2548 -{
2549 - unsigned long rnd = 0;
2550 -
2551 - /* 8MB for 32bit, 1GB for 64bit */
2552 - if (is_32bit_task())
2553 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2554 - else
2555 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2556 -
2557 - return rnd << PAGE_SHIFT;
2558 -}
2559 -
2560 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2561 -{
2562 - unsigned long base = mm->brk;
2563 - unsigned long ret;
2564 -
2565 -#ifdef CONFIG_PPC_STD_MMU_64
2566 - /*
2567 - * If we are using 1TB segments and we are allowed to randomise
2568 - * the heap, we can put it above 1TB so it is backed by a 1TB
2569 - * segment. Otherwise the heap will be in the bottom 1TB
2570 - * which always uses 256MB segments and this may result in a
2571 - * performance penalty.
2572 - */
2573 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2574 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2575 -#endif
2576 -
2577 - ret = PAGE_ALIGN(base + brk_rnd());
2578 -
2579 - if (ret < mm->brk)
2580 - return mm->brk;
2581 -
2582 - return ret;
2583 -}
2584 -
2585 -unsigned long randomize_et_dyn(unsigned long base)
2586 -{
2587 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2588 -
2589 - if (ret < base)
2590 - return base;
2591 -
2592 - return ret;
2593 -}
2594 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2595 index 836a5a1..27289a3 100644
2596 --- a/arch/powerpc/kernel/signal_32.c
2597 +++ b/arch/powerpc/kernel/signal_32.c
2598 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2599 /* Save user registers on the stack */
2600 frame = &rt_sf->uc.uc_mcontext;
2601 addr = frame;
2602 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2603 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2604 if (save_user_regs(regs, frame, 0, 1))
2605 goto badframe;
2606 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2607 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2608 index a50b5ec..547078a 100644
2609 --- a/arch/powerpc/kernel/signal_64.c
2610 +++ b/arch/powerpc/kernel/signal_64.c
2611 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2612 current->thread.fpscr.val = 0;
2613
2614 /* Set up to return from userspace. */
2615 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2616 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2617 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2618 } else {
2619 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2620 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2621 index 5459d14..10f8070 100644
2622 --- a/arch/powerpc/kernel/traps.c
2623 +++ b/arch/powerpc/kernel/traps.c
2624 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2625 static inline void pmac_backlight_unblank(void) { }
2626 #endif
2627
2628 +extern void gr_handle_kernel_exploit(void);
2629 +
2630 int die(const char *str, struct pt_regs *regs, long err)
2631 {
2632 static struct {
2633 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2634 if (panic_on_oops)
2635 panic("Fatal exception");
2636
2637 + gr_handle_kernel_exploit();
2638 +
2639 oops_exit();
2640 do_exit(err);
2641
2642 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2643 index 7d14bb6..1305601 100644
2644 --- a/arch/powerpc/kernel/vdso.c
2645 +++ b/arch/powerpc/kernel/vdso.c
2646 @@ -35,6 +35,7 @@
2647 #include <asm/firmware.h>
2648 #include <asm/vdso.h>
2649 #include <asm/vdso_datapage.h>
2650 +#include <asm/mman.h>
2651
2652 #include "setup.h"
2653
2654 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2655 vdso_base = VDSO32_MBASE;
2656 #endif
2657
2658 - current->mm->context.vdso_base = 0;
2659 + current->mm->context.vdso_base = ~0UL;
2660
2661 /* vDSO has a problem and was disabled, just don't "enable" it for the
2662 * process
2663 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2664 vdso_base = get_unmapped_area(NULL, vdso_base,
2665 (vdso_pages << PAGE_SHIFT) +
2666 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2667 - 0, 0);
2668 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2669 if (IS_ERR_VALUE(vdso_base)) {
2670 rc = vdso_base;
2671 goto fail_mmapsem;
2672 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2673 index 5eea6f3..5d10396 100644
2674 --- a/arch/powerpc/lib/usercopy_64.c
2675 +++ b/arch/powerpc/lib/usercopy_64.c
2676 @@ -9,22 +9,6 @@
2677 #include <linux/module.h>
2678 #include <asm/uaccess.h>
2679
2680 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2681 -{
2682 - if (likely(access_ok(VERIFY_READ, from, n)))
2683 - n = __copy_from_user(to, from, n);
2684 - else
2685 - memset(to, 0, n);
2686 - return n;
2687 -}
2688 -
2689 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2690 -{
2691 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2692 - n = __copy_to_user(to, from, n);
2693 - return n;
2694 -}
2695 -
2696 unsigned long copy_in_user(void __user *to, const void __user *from,
2697 unsigned long n)
2698 {
2699 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2700 return n;
2701 }
2702
2703 -EXPORT_SYMBOL(copy_from_user);
2704 -EXPORT_SYMBOL(copy_to_user);
2705 EXPORT_SYMBOL(copy_in_user);
2706
2707 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2708 index 5efe8c9..db9ceef 100644
2709 --- a/arch/powerpc/mm/fault.c
2710 +++ b/arch/powerpc/mm/fault.c
2711 @@ -32,6 +32,10 @@
2712 #include <linux/perf_event.h>
2713 #include <linux/magic.h>
2714 #include <linux/ratelimit.h>
2715 +#include <linux/slab.h>
2716 +#include <linux/pagemap.h>
2717 +#include <linux/compiler.h>
2718 +#include <linux/unistd.h>
2719
2720 #include <asm/firmware.h>
2721 #include <asm/page.h>
2722 @@ -43,6 +47,7 @@
2723 #include <asm/tlbflush.h>
2724 #include <asm/siginfo.h>
2725 #include <mm/mmu_decl.h>
2726 +#include <asm/ptrace.h>
2727
2728 #ifdef CONFIG_KPROBES
2729 static inline int notify_page_fault(struct pt_regs *regs)
2730 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2731 }
2732 #endif
2733
2734 +#ifdef CONFIG_PAX_PAGEEXEC
2735 +/*
2736 + * PaX: decide what to do with offenders (regs->nip = fault address)
2737 + *
2738 + * returns 1 when task should be killed
2739 + */
2740 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2741 +{
2742 + return 1;
2743 +}
2744 +
2745 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2746 +{
2747 + unsigned long i;
2748 +
2749 + printk(KERN_ERR "PAX: bytes at PC: ");
2750 + for (i = 0; i < 5; i++) {
2751 + unsigned int c;
2752 + if (get_user(c, (unsigned int __user *)pc+i))
2753 + printk(KERN_CONT "???????? ");
2754 + else
2755 + printk(KERN_CONT "%08x ", c);
2756 + }
2757 + printk("\n");
2758 +}
2759 +#endif
2760 +
2761 /*
2762 * Check whether the instruction at regs->nip is a store using
2763 * an update addressing form which will update r1.
2764 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2765 * indicate errors in DSISR but can validly be set in SRR1.
2766 */
2767 if (trap == 0x400)
2768 - error_code &= 0x48200000;
2769 + error_code &= 0x58200000;
2770 else
2771 is_write = error_code & DSISR_ISSTORE;
2772 #else
2773 @@ -259,7 +291,7 @@ good_area:
2774 * "undefined". Of those that can be set, this is the only
2775 * one which seems bad.
2776 */
2777 - if (error_code & 0x10000000)
2778 + if (error_code & DSISR_GUARDED)
2779 /* Guarded storage error. */
2780 goto bad_area;
2781 #endif /* CONFIG_8xx */
2782 @@ -274,7 +306,7 @@ good_area:
2783 * processors use the same I/D cache coherency mechanism
2784 * as embedded.
2785 */
2786 - if (error_code & DSISR_PROTFAULT)
2787 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2788 goto bad_area;
2789 #endif /* CONFIG_PPC_STD_MMU */
2790
2791 @@ -343,6 +375,23 @@ bad_area:
2792 bad_area_nosemaphore:
2793 /* User mode accesses cause a SIGSEGV */
2794 if (user_mode(regs)) {
2795 +
2796 +#ifdef CONFIG_PAX_PAGEEXEC
2797 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2798 +#ifdef CONFIG_PPC_STD_MMU
2799 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2800 +#else
2801 + if (is_exec && regs->nip == address) {
2802 +#endif
2803 + switch (pax_handle_fetch_fault(regs)) {
2804 + }
2805 +
2806 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2807 + do_group_exit(SIGKILL);
2808 + }
2809 + }
2810 +#endif
2811 +
2812 _exception(SIGSEGV, regs, code, address);
2813 return 0;
2814 }
2815 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2816 index 5a783d8..c23e14b 100644
2817 --- a/arch/powerpc/mm/mmap_64.c
2818 +++ b/arch/powerpc/mm/mmap_64.c
2819 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2820 */
2821 if (mmap_is_legacy()) {
2822 mm->mmap_base = TASK_UNMAPPED_BASE;
2823 +
2824 +#ifdef CONFIG_PAX_RANDMMAP
2825 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2826 + mm->mmap_base += mm->delta_mmap;
2827 +#endif
2828 +
2829 mm->get_unmapped_area = arch_get_unmapped_area;
2830 mm->unmap_area = arch_unmap_area;
2831 } else {
2832 mm->mmap_base = mmap_base();
2833 +
2834 +#ifdef CONFIG_PAX_RANDMMAP
2835 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2836 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2837 +#endif
2838 +
2839 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2840 mm->unmap_area = arch_unmap_area_topdown;
2841 }
2842 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2843 index 73709f7..6b90313 100644
2844 --- a/arch/powerpc/mm/slice.c
2845 +++ b/arch/powerpc/mm/slice.c
2846 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2847 if ((mm->task_size - len) < addr)
2848 return 0;
2849 vma = find_vma(mm, addr);
2850 - return (!vma || (addr + len) <= vma->vm_start);
2851 + return check_heap_stack_gap(vma, addr, len);
2852 }
2853
2854 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2855 @@ -256,7 +256,7 @@ full_search:
2856 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2857 continue;
2858 }
2859 - if (!vma || addr + len <= vma->vm_start) {
2860 + if (check_heap_stack_gap(vma, addr, len)) {
2861 /*
2862 * Remember the place where we stopped the search:
2863 */
2864 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2865 }
2866 }
2867
2868 - addr = mm->mmap_base;
2869 - while (addr > len) {
2870 + if (mm->mmap_base < len)
2871 + addr = -ENOMEM;
2872 + else
2873 + addr = mm->mmap_base - len;
2874 +
2875 + while (!IS_ERR_VALUE(addr)) {
2876 /* Go down by chunk size */
2877 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2878 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2879
2880 /* Check for hit with different page size */
2881 mask = slice_range_to_mask(addr, len);
2882 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2883 * return with success:
2884 */
2885 vma = find_vma(mm, addr);
2886 - if (!vma || (addr + len) <= vma->vm_start) {
2887 + if (check_heap_stack_gap(vma, addr, len)) {
2888 /* remember the address as a hint for next time */
2889 if (use_cache)
2890 mm->free_area_cache = addr;
2891 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2892 mm->cached_hole_size = vma->vm_start - addr;
2893
2894 /* try just below the current vma->vm_start */
2895 - addr = vma->vm_start;
2896 + addr = skip_heap_stack_gap(vma, len);
2897 }
2898
2899 /*
2900 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2901 if (fixed && addr > (mm->task_size - len))
2902 return -EINVAL;
2903
2904 +#ifdef CONFIG_PAX_RANDMMAP
2905 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2906 + addr = 0;
2907 +#endif
2908 +
2909 /* If hint, make sure it matches our alignment restrictions */
2910 if (!fixed && addr) {
2911 addr = _ALIGN_UP(addr, 1ul << pshift);
2912 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2913 index 547f1a6..3fff354 100644
2914 --- a/arch/s390/include/asm/elf.h
2915 +++ b/arch/s390/include/asm/elf.h
2916 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2917 the loader. We need to make sure that it is out of the way of the program
2918 that it will "exec", and that there is sufficient room for the brk. */
2919
2920 -extern unsigned long randomize_et_dyn(unsigned long base);
2921 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2922 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2923 +
2924 +#ifdef CONFIG_PAX_ASLR
2925 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2926 +
2927 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2928 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2929 +#endif
2930
2931 /* This yields a mask that user programs can use to figure out what
2932 instruction set this CPU supports. */
2933 @@ -211,7 +217,4 @@ struct linux_binprm;
2934 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2935 int arch_setup_additional_pages(struct linux_binprm *, int);
2936
2937 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2938 -#define arch_randomize_brk arch_randomize_brk
2939 -
2940 #endif
2941 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2942 index ef573c1..75a1ce6 100644
2943 --- a/arch/s390/include/asm/system.h
2944 +++ b/arch/s390/include/asm/system.h
2945 @@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
2946 extern void (*_machine_halt)(void);
2947 extern void (*_machine_power_off)(void);
2948
2949 -extern unsigned long arch_align_stack(unsigned long sp);
2950 +#define arch_align_stack(x) ((x) & ~0xfUL)
2951
2952 static inline int tprot(unsigned long addr)
2953 {
2954 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2955 index 2b23885..e136e31 100644
2956 --- a/arch/s390/include/asm/uaccess.h
2957 +++ b/arch/s390/include/asm/uaccess.h
2958 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2959 copy_to_user(void __user *to, const void *from, unsigned long n)
2960 {
2961 might_fault();
2962 +
2963 + if ((long)n < 0)
2964 + return n;
2965 +
2966 if (access_ok(VERIFY_WRITE, to, n))
2967 n = __copy_to_user(to, from, n);
2968 return n;
2969 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2970 static inline unsigned long __must_check
2971 __copy_from_user(void *to, const void __user *from, unsigned long n)
2972 {
2973 + if ((long)n < 0)
2974 + return n;
2975 +
2976 if (__builtin_constant_p(n) && (n <= 256))
2977 return uaccess.copy_from_user_small(n, from, to);
2978 else
2979 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2980 unsigned int sz = __compiletime_object_size(to);
2981
2982 might_fault();
2983 +
2984 + if ((long)n < 0)
2985 + return n;
2986 +
2987 if (unlikely(sz != -1 && sz < n)) {
2988 copy_from_user_overflow();
2989 return n;
2990 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2991 index dfcb343..eda788a 100644
2992 --- a/arch/s390/kernel/module.c
2993 +++ b/arch/s390/kernel/module.c
2994 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2995
2996 /* Increase core size by size of got & plt and set start
2997 offsets for got and plt. */
2998 - me->core_size = ALIGN(me->core_size, 4);
2999 - me->arch.got_offset = me->core_size;
3000 - me->core_size += me->arch.got_size;
3001 - me->arch.plt_offset = me->core_size;
3002 - me->core_size += me->arch.plt_size;
3003 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3004 + me->arch.got_offset = me->core_size_rw;
3005 + me->core_size_rw += me->arch.got_size;
3006 + me->arch.plt_offset = me->core_size_rx;
3007 + me->core_size_rx += me->arch.plt_size;
3008 return 0;
3009 }
3010
3011 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3012 if (info->got_initialized == 0) {
3013 Elf_Addr *gotent;
3014
3015 - gotent = me->module_core + me->arch.got_offset +
3016 + gotent = me->module_core_rw + me->arch.got_offset +
3017 info->got_offset;
3018 *gotent = val;
3019 info->got_initialized = 1;
3020 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3021 else if (r_type == R_390_GOTENT ||
3022 r_type == R_390_GOTPLTENT)
3023 *(unsigned int *) loc =
3024 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3025 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3026 else if (r_type == R_390_GOT64 ||
3027 r_type == R_390_GOTPLT64)
3028 *(unsigned long *) loc = val;
3029 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3030 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3031 if (info->plt_initialized == 0) {
3032 unsigned int *ip;
3033 - ip = me->module_core + me->arch.plt_offset +
3034 + ip = me->module_core_rx + me->arch.plt_offset +
3035 info->plt_offset;
3036 #ifndef CONFIG_64BIT
3037 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3038 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3039 val - loc + 0xffffUL < 0x1ffffeUL) ||
3040 (r_type == R_390_PLT32DBL &&
3041 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3042 - val = (Elf_Addr) me->module_core +
3043 + val = (Elf_Addr) me->module_core_rx +
3044 me->arch.plt_offset +
3045 info->plt_offset;
3046 val += rela->r_addend - loc;
3047 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3048 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3049 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3050 val = val + rela->r_addend -
3051 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3052 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3053 if (r_type == R_390_GOTOFF16)
3054 *(unsigned short *) loc = val;
3055 else if (r_type == R_390_GOTOFF32)
3056 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3057 break;
3058 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3059 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3060 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3061 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3062 rela->r_addend - loc;
3063 if (r_type == R_390_GOTPC)
3064 *(unsigned int *) loc = val;
3065 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3066 index 9451b21..ed8956f 100644
3067 --- a/arch/s390/kernel/process.c
3068 +++ b/arch/s390/kernel/process.c
3069 @@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3070 }
3071 return 0;
3072 }
3073 -
3074 -unsigned long arch_align_stack(unsigned long sp)
3075 -{
3076 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3077 - sp -= get_random_int() & ~PAGE_MASK;
3078 - return sp & ~0xf;
3079 -}
3080 -
3081 -static inline unsigned long brk_rnd(void)
3082 -{
3083 - /* 8MB for 32bit, 1GB for 64bit */
3084 - if (is_32bit_task())
3085 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3086 - else
3087 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3088 -}
3089 -
3090 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3091 -{
3092 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3093 -
3094 - if (ret < mm->brk)
3095 - return mm->brk;
3096 - return ret;
3097 -}
3098 -
3099 -unsigned long randomize_et_dyn(unsigned long base)
3100 -{
3101 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3102 -
3103 - if (!(current->flags & PF_RANDOMIZE))
3104 - return base;
3105 - if (ret < base)
3106 - return base;
3107 - return ret;
3108 -}
3109 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3110 index f09c748..cf9ec1d 100644
3111 --- a/arch/s390/mm/mmap.c
3112 +++ b/arch/s390/mm/mmap.c
3113 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3114 */
3115 if (mmap_is_legacy()) {
3116 mm->mmap_base = TASK_UNMAPPED_BASE;
3117 +
3118 +#ifdef CONFIG_PAX_RANDMMAP
3119 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3120 + mm->mmap_base += mm->delta_mmap;
3121 +#endif
3122 +
3123 mm->get_unmapped_area = arch_get_unmapped_area;
3124 mm->unmap_area = arch_unmap_area;
3125 } else {
3126 mm->mmap_base = mmap_base();
3127 +
3128 +#ifdef CONFIG_PAX_RANDMMAP
3129 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3130 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3131 +#endif
3132 +
3133 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3134 mm->unmap_area = arch_unmap_area_topdown;
3135 }
3136 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3137 */
3138 if (mmap_is_legacy()) {
3139 mm->mmap_base = TASK_UNMAPPED_BASE;
3140 +
3141 +#ifdef CONFIG_PAX_RANDMMAP
3142 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3143 + mm->mmap_base += mm->delta_mmap;
3144 +#endif
3145 +
3146 mm->get_unmapped_area = s390_get_unmapped_area;
3147 mm->unmap_area = arch_unmap_area;
3148 } else {
3149 mm->mmap_base = mmap_base();
3150 +
3151 +#ifdef CONFIG_PAX_RANDMMAP
3152 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3153 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3154 +#endif
3155 +
3156 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3157 mm->unmap_area = arch_unmap_area_topdown;
3158 }
3159 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3160 index 589d5c7..669e274 100644
3161 --- a/arch/score/include/asm/system.h
3162 +++ b/arch/score/include/asm/system.h
3163 @@ -17,7 +17,7 @@ do { \
3164 #define finish_arch_switch(prev) do {} while (0)
3165
3166 typedef void (*vi_handler_t)(void);
3167 -extern unsigned long arch_align_stack(unsigned long sp);
3168 +#define arch_align_stack(x) (x)
3169
3170 #define mb() barrier()
3171 #define rmb() barrier()
3172 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3173 index 25d0803..d6c8e36 100644
3174 --- a/arch/score/kernel/process.c
3175 +++ b/arch/score/kernel/process.c
3176 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3177
3178 return task_pt_regs(task)->cp0_epc;
3179 }
3180 -
3181 -unsigned long arch_align_stack(unsigned long sp)
3182 -{
3183 - return sp;
3184 -}
3185 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3186 index afeb710..d1d1289 100644
3187 --- a/arch/sh/mm/mmap.c
3188 +++ b/arch/sh/mm/mmap.c
3189 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3190 addr = PAGE_ALIGN(addr);
3191
3192 vma = find_vma(mm, addr);
3193 - if (TASK_SIZE - len >= addr &&
3194 - (!vma || addr + len <= vma->vm_start))
3195 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3196 return addr;
3197 }
3198
3199 @@ -106,7 +105,7 @@ full_search:
3200 }
3201 return -ENOMEM;
3202 }
3203 - if (likely(!vma || addr + len <= vma->vm_start)) {
3204 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3205 /*
3206 * Remember the place where we stopped the search:
3207 */
3208 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3209 addr = PAGE_ALIGN(addr);
3210
3211 vma = find_vma(mm, addr);
3212 - if (TASK_SIZE - len >= addr &&
3213 - (!vma || addr + len <= vma->vm_start))
3214 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 }
3217
3218 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3219 /* make sure it can fit in the remaining address space */
3220 if (likely(addr > len)) {
3221 vma = find_vma(mm, addr-len);
3222 - if (!vma || addr <= vma->vm_start) {
3223 + if (check_heap_stack_gap(vma, addr - len, len)) {
3224 /* remember the address as a hint for next time */
3225 return (mm->free_area_cache = addr-len);
3226 }
3227 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3228 if (unlikely(mm->mmap_base < len))
3229 goto bottomup;
3230
3231 - addr = mm->mmap_base-len;
3232 - if (do_colour_align)
3233 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3234 + addr = mm->mmap_base - len;
3235
3236 do {
3237 + if (do_colour_align)
3238 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3239 /*
3240 * Lookup failure means no vma is above this address,
3241 * else if new region fits below vma->vm_start,
3242 * return with success:
3243 */
3244 vma = find_vma(mm, addr);
3245 - if (likely(!vma || addr+len <= vma->vm_start)) {
3246 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3247 /* remember the address as a hint for next time */
3248 return (mm->free_area_cache = addr);
3249 }
3250 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3251 mm->cached_hole_size = vma->vm_start - addr;
3252
3253 /* try just below the current vma->vm_start */
3254 - addr = vma->vm_start-len;
3255 - if (do_colour_align)
3256 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3257 - } while (likely(len < vma->vm_start));
3258 + addr = skip_heap_stack_gap(vma, len);
3259 + } while (!IS_ERR_VALUE(addr));
3260
3261 bottomup:
3262 /*
3263 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3264 index ad1fb5d..fc5315b 100644
3265 --- a/arch/sparc/Makefile
3266 +++ b/arch/sparc/Makefile
3267 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3268 # Export what is needed by arch/sparc/boot/Makefile
3269 export VMLINUX_INIT VMLINUX_MAIN
3270 VMLINUX_INIT := $(head-y) $(init-y)
3271 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3272 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3273 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3274 VMLINUX_MAIN += $(drivers-y) $(net-y)
3275
3276 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3277 index 9f421df..b81fc12 100644
3278 --- a/arch/sparc/include/asm/atomic_64.h
3279 +++ b/arch/sparc/include/asm/atomic_64.h
3280 @@ -14,18 +14,40 @@
3281 #define ATOMIC64_INIT(i) { (i) }
3282
3283 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3284 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3285 +{
3286 + return v->counter;
3287 +}
3288 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3289 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3290 +{
3291 + return v->counter;
3292 +}
3293
3294 #define atomic_set(v, i) (((v)->counter) = i)
3295 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3296 +{
3297 + v->counter = i;
3298 +}
3299 #define atomic64_set(v, i) (((v)->counter) = i)
3300 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3301 +{
3302 + v->counter = i;
3303 +}
3304
3305 extern void atomic_add(int, atomic_t *);
3306 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3307 extern void atomic64_add(long, atomic64_t *);
3308 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3309 extern void atomic_sub(int, atomic_t *);
3310 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3311 extern void atomic64_sub(long, atomic64_t *);
3312 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3313
3314 extern int atomic_add_ret(int, atomic_t *);
3315 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3316 extern long atomic64_add_ret(long, atomic64_t *);
3317 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3318 extern int atomic_sub_ret(int, atomic_t *);
3319 extern long atomic64_sub_ret(long, atomic64_t *);
3320
3321 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3322 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3323
3324 #define atomic_inc_return(v) atomic_add_ret(1, v)
3325 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3326 +{
3327 + return atomic_add_ret_unchecked(1, v);
3328 +}
3329 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3330 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3331 +{
3332 + return atomic64_add_ret_unchecked(1, v);
3333 +}
3334
3335 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3336 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3337
3338 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3339 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3340 +{
3341 + return atomic_add_ret_unchecked(i, v);
3342 +}
3343 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3344 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3345 +{
3346 + return atomic64_add_ret_unchecked(i, v);
3347 +}
3348
3349 /*
3350 * atomic_inc_and_test - increment and test
3351 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3352 * other cases.
3353 */
3354 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3355 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3356 +{
3357 + return atomic_inc_return_unchecked(v) == 0;
3358 +}
3359 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3360
3361 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3362 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3363 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3364
3365 #define atomic_inc(v) atomic_add(1, v)
3366 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3367 +{
3368 + atomic_add_unchecked(1, v);
3369 +}
3370 #define atomic64_inc(v) atomic64_add(1, v)
3371 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3372 +{
3373 + atomic64_add_unchecked(1, v);
3374 +}
3375
3376 #define atomic_dec(v) atomic_sub(1, v)
3377 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3378 +{
3379 + atomic_sub_unchecked(1, v);
3380 +}
3381 #define atomic64_dec(v) atomic64_sub(1, v)
3382 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3383 +{
3384 + atomic64_sub_unchecked(1, v);
3385 +}
3386
3387 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3388 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3389
3390 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3391 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3392 +{
3393 + return cmpxchg(&v->counter, old, new);
3394 +}
3395 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3396 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3397 +{
3398 + return xchg(&v->counter, new);
3399 +}
3400
3401 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3402 {
3403 - int c, old;
3404 + int c, old, new;
3405 c = atomic_read(v);
3406 for (;;) {
3407 - if (unlikely(c == (u)))
3408 + if (unlikely(c == u))
3409 break;
3410 - old = atomic_cmpxchg((v), c, c + (a));
3411 +
3412 + asm volatile("addcc %2, %0, %0\n"
3413 +
3414 +#ifdef CONFIG_PAX_REFCOUNT
3415 + "tvs %%icc, 6\n"
3416 +#endif
3417 +
3418 + : "=r" (new)
3419 + : "0" (c), "ir" (a)
3420 + : "cc");
3421 +
3422 + old = atomic_cmpxchg(v, c, new);
3423 if (likely(old == c))
3424 break;
3425 c = old;
3426 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3427 #define atomic64_cmpxchg(v, o, n) \
3428 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3429 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3430 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3431 +{
3432 + return xchg(&v->counter, new);
3433 +}
3434
3435 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3436 {
3437 - long c, old;
3438 + long c, old, new;
3439 c = atomic64_read(v);
3440 for (;;) {
3441 - if (unlikely(c == (u)))
3442 + if (unlikely(c == u))
3443 break;
3444 - old = atomic64_cmpxchg((v), c, c + (a));
3445 +
3446 + asm volatile("addcc %2, %0, %0\n"
3447 +
3448 +#ifdef CONFIG_PAX_REFCOUNT
3449 + "tvs %%xcc, 6\n"
3450 +#endif
3451 +
3452 + : "=r" (new)
3453 + : "0" (c), "ir" (a)
3454 + : "cc");
3455 +
3456 + old = atomic64_cmpxchg(v, c, new);
3457 if (likely(old == c))
3458 break;
3459 c = old;
3460 }
3461 - return c != (u);
3462 + return c != u;
3463 }
3464
3465 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3466 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3467 index 69358b5..17b4745 100644
3468 --- a/arch/sparc/include/asm/cache.h
3469 +++ b/arch/sparc/include/asm/cache.h
3470 @@ -10,7 +10,7 @@
3471 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3472
3473 #define L1_CACHE_SHIFT 5
3474 -#define L1_CACHE_BYTES 32
3475 +#define L1_CACHE_BYTES 32UL
3476
3477 #ifdef CONFIG_SPARC32
3478 #define SMP_CACHE_BYTES_SHIFT 5
3479 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3480 index 4269ca6..e3da77f 100644
3481 --- a/arch/sparc/include/asm/elf_32.h
3482 +++ b/arch/sparc/include/asm/elf_32.h
3483 @@ -114,6 +114,13 @@ typedef struct {
3484
3485 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3486
3487 +#ifdef CONFIG_PAX_ASLR
3488 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3489 +
3490 +#define PAX_DELTA_MMAP_LEN 16
3491 +#define PAX_DELTA_STACK_LEN 16
3492 +#endif
3493 +
3494 /* This yields a mask that user programs can use to figure out what
3495 instruction set this cpu supports. This can NOT be done in userspace
3496 on Sparc. */
3497 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3498 index 7df8b7f..4946269 100644
3499 --- a/arch/sparc/include/asm/elf_64.h
3500 +++ b/arch/sparc/include/asm/elf_64.h
3501 @@ -180,6 +180,13 @@ typedef struct {
3502 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3503 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3504
3505 +#ifdef CONFIG_PAX_ASLR
3506 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3507 +
3508 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3509 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3510 +#endif
3511 +
3512 extern unsigned long sparc64_elf_hwcap;
3513 #define ELF_HWCAP sparc64_elf_hwcap
3514
3515 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3516 index a790cc6..091ed94 100644
3517 --- a/arch/sparc/include/asm/pgtable_32.h
3518 +++ b/arch/sparc/include/asm/pgtable_32.h
3519 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3520 BTFIXUPDEF_INT(page_none)
3521 BTFIXUPDEF_INT(page_copy)
3522 BTFIXUPDEF_INT(page_readonly)
3523 +
3524 +#ifdef CONFIG_PAX_PAGEEXEC
3525 +BTFIXUPDEF_INT(page_shared_noexec)
3526 +BTFIXUPDEF_INT(page_copy_noexec)
3527 +BTFIXUPDEF_INT(page_readonly_noexec)
3528 +#endif
3529 +
3530 BTFIXUPDEF_INT(page_kernel)
3531
3532 #define PMD_SHIFT SUN4C_PMD_SHIFT
3533 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3534 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3535 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3536
3537 +#ifdef CONFIG_PAX_PAGEEXEC
3538 +extern pgprot_t PAGE_SHARED_NOEXEC;
3539 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3540 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3541 +#else
3542 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3543 +# define PAGE_COPY_NOEXEC PAGE_COPY
3544 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3545 +#endif
3546 +
3547 extern unsigned long page_kernel;
3548
3549 #ifdef MODULE
3550 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3551 index f6ae2b2..b03ffc7 100644
3552 --- a/arch/sparc/include/asm/pgtsrmmu.h
3553 +++ b/arch/sparc/include/asm/pgtsrmmu.h
3554 @@ -115,6 +115,13 @@
3555 SRMMU_EXEC | SRMMU_REF)
3556 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3557 SRMMU_EXEC | SRMMU_REF)
3558 +
3559 +#ifdef CONFIG_PAX_PAGEEXEC
3560 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3561 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3562 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3563 +#endif
3564 +
3565 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3566 SRMMU_DIRTY | SRMMU_REF)
3567
3568 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3569 index 9689176..63c18ea 100644
3570 --- a/arch/sparc/include/asm/spinlock_64.h
3571 +++ b/arch/sparc/include/asm/spinlock_64.h
3572 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3573
3574 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3575
3576 -static void inline arch_read_lock(arch_rwlock_t *lock)
3577 +static inline void arch_read_lock(arch_rwlock_t *lock)
3578 {
3579 unsigned long tmp1, tmp2;
3580
3581 __asm__ __volatile__ (
3582 "1: ldsw [%2], %0\n"
3583 " brlz,pn %0, 2f\n"
3584 -"4: add %0, 1, %1\n"
3585 +"4: addcc %0, 1, %1\n"
3586 +
3587 +#ifdef CONFIG_PAX_REFCOUNT
3588 +" tvs %%icc, 6\n"
3589 +#endif
3590 +
3591 " cas [%2], %0, %1\n"
3592 " cmp %0, %1\n"
3593 " bne,pn %%icc, 1b\n"
3594 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3595 " .previous"
3596 : "=&r" (tmp1), "=&r" (tmp2)
3597 : "r" (lock)
3598 - : "memory");
3599 + : "memory", "cc");
3600 }
3601
3602 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3603 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3604 {
3605 int tmp1, tmp2;
3606
3607 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3608 "1: ldsw [%2], %0\n"
3609 " brlz,a,pn %0, 2f\n"
3610 " mov 0, %0\n"
3611 -" add %0, 1, %1\n"
3612 +" addcc %0, 1, %1\n"
3613 +
3614 +#ifdef CONFIG_PAX_REFCOUNT
3615 +" tvs %%icc, 6\n"
3616 +#endif
3617 +
3618 " cas [%2], %0, %1\n"
3619 " cmp %0, %1\n"
3620 " bne,pn %%icc, 1b\n"
3621 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3622 return tmp1;
3623 }
3624
3625 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3626 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3627 {
3628 unsigned long tmp1, tmp2;
3629
3630 __asm__ __volatile__(
3631 "1: lduw [%2], %0\n"
3632 -" sub %0, 1, %1\n"
3633 +" subcc %0, 1, %1\n"
3634 +
3635 +#ifdef CONFIG_PAX_REFCOUNT
3636 +" tvs %%icc, 6\n"
3637 +#endif
3638 +
3639 " cas [%2], %0, %1\n"
3640 " cmp %0, %1\n"
3641 " bne,pn %%xcc, 1b\n"
3642 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3643 : "memory");
3644 }
3645
3646 -static void inline arch_write_lock(arch_rwlock_t *lock)
3647 +static inline void arch_write_lock(arch_rwlock_t *lock)
3648 {
3649 unsigned long mask, tmp1, tmp2;
3650
3651 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3652 : "memory");
3653 }
3654
3655 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3656 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3657 {
3658 __asm__ __volatile__(
3659 " stw %%g0, [%0]"
3660 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3661 : "memory");
3662 }
3663
3664 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3665 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3666 {
3667 unsigned long mask, tmp1, tmp2, result;
3668
3669 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3670 index fa57532..e1a4c53 100644
3671 --- a/arch/sparc/include/asm/thread_info_32.h
3672 +++ b/arch/sparc/include/asm/thread_info_32.h
3673 @@ -50,6 +50,8 @@ struct thread_info {
3674 unsigned long w_saved;
3675
3676 struct restart_block restart_block;
3677 +
3678 + unsigned long lowest_stack;
3679 };
3680
3681 /*
3682 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3683 index 60d86be..952dea1 100644
3684 --- a/arch/sparc/include/asm/thread_info_64.h
3685 +++ b/arch/sparc/include/asm/thread_info_64.h
3686 @@ -63,6 +63,8 @@ struct thread_info {
3687 struct pt_regs *kern_una_regs;
3688 unsigned int kern_una_insn;
3689
3690 + unsigned long lowest_stack;
3691 +
3692 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3693 };
3694
3695 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3696 index e88fbe5..96b0ce5 100644
3697 --- a/arch/sparc/include/asm/uaccess.h
3698 +++ b/arch/sparc/include/asm/uaccess.h
3699 @@ -1,5 +1,13 @@
3700 #ifndef ___ASM_SPARC_UACCESS_H
3701 #define ___ASM_SPARC_UACCESS_H
3702 +
3703 +#ifdef __KERNEL__
3704 +#ifndef __ASSEMBLY__
3705 +#include <linux/types.h>
3706 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3707 +#endif
3708 +#endif
3709 +
3710 #if defined(__sparc__) && defined(__arch64__)
3711 #include <asm/uaccess_64.h>
3712 #else
3713 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3714 index 8303ac4..07f333d 100644
3715 --- a/arch/sparc/include/asm/uaccess_32.h
3716 +++ b/arch/sparc/include/asm/uaccess_32.h
3717 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3718
3719 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3720 {
3721 - if (n && __access_ok((unsigned long) to, n))
3722 + if ((long)n < 0)
3723 + return n;
3724 +
3725 + if (n && __access_ok((unsigned long) to, n)) {
3726 + if (!__builtin_constant_p(n))
3727 + check_object_size(from, n, true);
3728 return __copy_user(to, (__force void __user *) from, n);
3729 - else
3730 + } else
3731 return n;
3732 }
3733
3734 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3735 {
3736 + if ((long)n < 0)
3737 + return n;
3738 +
3739 + if (!__builtin_constant_p(n))
3740 + check_object_size(from, n, true);
3741 +
3742 return __copy_user(to, (__force void __user *) from, n);
3743 }
3744
3745 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3746 {
3747 - if (n && __access_ok((unsigned long) from, n))
3748 + if ((long)n < 0)
3749 + return n;
3750 +
3751 + if (n && __access_ok((unsigned long) from, n)) {
3752 + if (!__builtin_constant_p(n))
3753 + check_object_size(to, n, false);
3754 return __copy_user((__force void __user *) to, from, n);
3755 - else
3756 + } else
3757 return n;
3758 }
3759
3760 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3761 {
3762 + if ((long)n < 0)
3763 + return n;
3764 +
3765 return __copy_user((__force void __user *) to, from, n);
3766 }
3767
3768 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3769 index 3e1449f..5293a0e 100644
3770 --- a/arch/sparc/include/asm/uaccess_64.h
3771 +++ b/arch/sparc/include/asm/uaccess_64.h
3772 @@ -10,6 +10,7 @@
3773 #include <linux/compiler.h>
3774 #include <linux/string.h>
3775 #include <linux/thread_info.h>
3776 +#include <linux/kernel.h>
3777 #include <asm/asi.h>
3778 #include <asm/system.h>
3779 #include <asm/spitfire.h>
3780 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3781 static inline unsigned long __must_check
3782 copy_from_user(void *to, const void __user *from, unsigned long size)
3783 {
3784 - unsigned long ret = ___copy_from_user(to, from, size);
3785 + unsigned long ret;
3786
3787 + if ((long)size < 0 || size > INT_MAX)
3788 + return size;
3789 +
3790 + if (!__builtin_constant_p(size))
3791 + check_object_size(to, size, false);
3792 +
3793 + ret = ___copy_from_user(to, from, size);
3794 if (unlikely(ret))
3795 ret = copy_from_user_fixup(to, from, size);
3796
3797 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3798 static inline unsigned long __must_check
3799 copy_to_user(void __user *to, const void *from, unsigned long size)
3800 {
3801 - unsigned long ret = ___copy_to_user(to, from, size);
3802 + unsigned long ret;
3803
3804 + if ((long)size < 0 || size > INT_MAX)
3805 + return size;
3806 +
3807 + if (!__builtin_constant_p(size))
3808 + check_object_size(from, size, true);
3809 +
3810 + ret = ___copy_to_user(to, from, size);
3811 if (unlikely(ret))
3812 ret = copy_to_user_fixup(to, from, size);
3813 return ret;
3814 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3815 index cb85458..e063f17 100644
3816 --- a/arch/sparc/kernel/Makefile
3817 +++ b/arch/sparc/kernel/Makefile
3818 @@ -3,7 +3,7 @@
3819 #
3820
3821 asflags-y := -ansi
3822 -ccflags-y := -Werror
3823 +#ccflags-y := -Werror
3824
3825 extra-y := head_$(BITS).o
3826 extra-y += init_task.o
3827 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3828 index f793742..4d880af 100644
3829 --- a/arch/sparc/kernel/process_32.c
3830 +++ b/arch/sparc/kernel/process_32.c
3831 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3832 rw->ins[4], rw->ins[5],
3833 rw->ins[6],
3834 rw->ins[7]);
3835 - printk("%pS\n", (void *) rw->ins[7]);
3836 + printk("%pA\n", (void *) rw->ins[7]);
3837 rw = (struct reg_window32 *) rw->ins[6];
3838 }
3839 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3840 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3841
3842 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3843 r->psr, r->pc, r->npc, r->y, print_tainted());
3844 - printk("PC: <%pS>\n", (void *) r->pc);
3845 + printk("PC: <%pA>\n", (void *) r->pc);
3846 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3847 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3848 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3849 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3850 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3851 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3852 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3853 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3854
3855 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3856 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3857 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3858 rw = (struct reg_window32 *) fp;
3859 pc = rw->ins[7];
3860 printk("[%08lx : ", pc);
3861 - printk("%pS ] ", (void *) pc);
3862 + printk("%pA ] ", (void *) pc);
3863 fp = rw->ins[6];
3864 } while (++count < 16);
3865 printk("\n");
3866 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3867 index 3739a06..48b2ff0 100644
3868 --- a/arch/sparc/kernel/process_64.c
3869 +++ b/arch/sparc/kernel/process_64.c
3870 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3871 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3872 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3873 if (regs->tstate & TSTATE_PRIV)
3874 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3875 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3876 }
3877
3878 void show_regs(struct pt_regs *regs)
3879 {
3880 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3881 regs->tpc, regs->tnpc, regs->y, print_tainted());
3882 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3883 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3884 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3885 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3886 regs->u_regs[3]);
3887 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3888 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3889 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3890 regs->u_regs[15]);
3891 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3892 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3893 show_regwindow(regs);
3894 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3895 }
3896 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3897 ((tp && tp->task) ? tp->task->pid : -1));
3898
3899 if (gp->tstate & TSTATE_PRIV) {
3900 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3901 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3902 (void *) gp->tpc,
3903 (void *) gp->o7,
3904 (void *) gp->i7,
3905 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3906 index 42b282f..28ce9f2 100644
3907 --- a/arch/sparc/kernel/sys_sparc_32.c
3908 +++ b/arch/sparc/kernel/sys_sparc_32.c
3909 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3910 if (ARCH_SUN4C && len > 0x20000000)
3911 return -ENOMEM;
3912 if (!addr)
3913 - addr = TASK_UNMAPPED_BASE;
3914 + addr = current->mm->mmap_base;
3915
3916 if (flags & MAP_SHARED)
3917 addr = COLOUR_ALIGN(addr);
3918 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3919 }
3920 if (TASK_SIZE - PAGE_SIZE - len < addr)
3921 return -ENOMEM;
3922 - if (!vmm || addr + len <= vmm->vm_start)
3923 + if (check_heap_stack_gap(vmm, addr, len))
3924 return addr;
3925 addr = vmm->vm_end;
3926 if (flags & MAP_SHARED)
3927 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3928 index 441521a..b767073 100644
3929 --- a/arch/sparc/kernel/sys_sparc_64.c
3930 +++ b/arch/sparc/kernel/sys_sparc_64.c
3931 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3932 /* We do not accept a shared mapping if it would violate
3933 * cache aliasing constraints.
3934 */
3935 - if ((flags & MAP_SHARED) &&
3936 + if ((filp || (flags & MAP_SHARED)) &&
3937 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3938 return -EINVAL;
3939 return addr;
3940 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3941 if (filp || (flags & MAP_SHARED))
3942 do_color_align = 1;
3943
3944 +#ifdef CONFIG_PAX_RANDMMAP
3945 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3946 +#endif
3947 +
3948 if (addr) {
3949 if (do_color_align)
3950 addr = COLOUR_ALIGN(addr, pgoff);
3951 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3952 addr = PAGE_ALIGN(addr);
3953
3954 vma = find_vma(mm, addr);
3955 - if (task_size - len >= addr &&
3956 - (!vma || addr + len <= vma->vm_start))
3957 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3958 return addr;
3959 }
3960
3961 if (len > mm->cached_hole_size) {
3962 - start_addr = addr = mm->free_area_cache;
3963 + start_addr = addr = mm->free_area_cache;
3964 } else {
3965 - start_addr = addr = TASK_UNMAPPED_BASE;
3966 + start_addr = addr = mm->mmap_base;
3967 mm->cached_hole_size = 0;
3968 }
3969
3970 @@ -174,14 +177,14 @@ full_search:
3971 vma = find_vma(mm, VA_EXCLUDE_END);
3972 }
3973 if (unlikely(task_size < addr)) {
3974 - if (start_addr != TASK_UNMAPPED_BASE) {
3975 - start_addr = addr = TASK_UNMAPPED_BASE;
3976 + if (start_addr != mm->mmap_base) {
3977 + start_addr = addr = mm->mmap_base;
3978 mm->cached_hole_size = 0;
3979 goto full_search;
3980 }
3981 return -ENOMEM;
3982 }
3983 - if (likely(!vma || addr + len <= vma->vm_start)) {
3984 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3985 /*
3986 * Remember the place where we stopped the search:
3987 */
3988 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3989 /* We do not accept a shared mapping if it would violate
3990 * cache aliasing constraints.
3991 */
3992 - if ((flags & MAP_SHARED) &&
3993 + if ((filp || (flags & MAP_SHARED)) &&
3994 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3995 return -EINVAL;
3996 return addr;
3997 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3998 addr = PAGE_ALIGN(addr);
3999
4000 vma = find_vma(mm, addr);
4001 - if (task_size - len >= addr &&
4002 - (!vma || addr + len <= vma->vm_start))
4003 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4004 return addr;
4005 }
4006
4007 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4008 /* make sure it can fit in the remaining address space */
4009 if (likely(addr > len)) {
4010 vma = find_vma(mm, addr-len);
4011 - if (!vma || addr <= vma->vm_start) {
4012 + if (check_heap_stack_gap(vma, addr - len, len)) {
4013 /* remember the address as a hint for next time */
4014 return (mm->free_area_cache = addr-len);
4015 }
4016 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4017 if (unlikely(mm->mmap_base < len))
4018 goto bottomup;
4019
4020 - addr = mm->mmap_base-len;
4021 - if (do_color_align)
4022 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4023 + addr = mm->mmap_base - len;
4024
4025 do {
4026 + if (do_color_align)
4027 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4028 /*
4029 * Lookup failure means no vma is above this address,
4030 * else if new region fits below vma->vm_start,
4031 * return with success:
4032 */
4033 vma = find_vma(mm, addr);
4034 - if (likely(!vma || addr+len <= vma->vm_start)) {
4035 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4036 /* remember the address as a hint for next time */
4037 return (mm->free_area_cache = addr);
4038 }
4039 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4040 mm->cached_hole_size = vma->vm_start - addr;
4041
4042 /* try just below the current vma->vm_start */
4043 - addr = vma->vm_start-len;
4044 - if (do_color_align)
4045 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4046 - } while (likely(len < vma->vm_start));
4047 + addr = skip_heap_stack_gap(vma, len);
4048 + } while (!IS_ERR_VALUE(addr));
4049
4050 bottomup:
4051 /*
4052 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 gap == RLIM_INFINITY ||
4054 sysctl_legacy_va_layout) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4056 +
4057 +#ifdef CONFIG_PAX_RANDMMAP
4058 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4059 + mm->mmap_base += mm->delta_mmap;
4060 +#endif
4061 +
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4066 gap = (task_size / 6 * 5);
4067
4068 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4069 +
4070 +#ifdef CONFIG_PAX_RANDMMAP
4071 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4072 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4073 +#endif
4074 +
4075 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4076 mm->unmap_area = arch_unmap_area_topdown;
4077 }
4078 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4079 index 591f20c..0f1b925 100644
4080 --- a/arch/sparc/kernel/traps_32.c
4081 +++ b/arch/sparc/kernel/traps_32.c
4082 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4083 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4084 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4085
4086 +extern void gr_handle_kernel_exploit(void);
4087 +
4088 void die_if_kernel(char *str, struct pt_regs *regs)
4089 {
4090 static int die_counter;
4091 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4092 count++ < 30 &&
4093 (((unsigned long) rw) >= PAGE_OFFSET) &&
4094 !(((unsigned long) rw) & 0x7)) {
4095 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4096 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4097 (void *) rw->ins[7]);
4098 rw = (struct reg_window32 *)rw->ins[6];
4099 }
4100 }
4101 printk("Instruction DUMP:");
4102 instruction_dump ((unsigned long *) regs->pc);
4103 - if(regs->psr & PSR_PS)
4104 + if(regs->psr & PSR_PS) {
4105 + gr_handle_kernel_exploit();
4106 do_exit(SIGKILL);
4107 + }
4108 do_exit(SIGSEGV);
4109 }
4110
4111 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4112 index 0cbdaa4..438e4c9 100644
4113 --- a/arch/sparc/kernel/traps_64.c
4114 +++ b/arch/sparc/kernel/traps_64.c
4115 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4116 i + 1,
4117 p->trapstack[i].tstate, p->trapstack[i].tpc,
4118 p->trapstack[i].tnpc, p->trapstack[i].tt);
4119 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4120 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4121 }
4122 }
4123
4124 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4125
4126 lvl -= 0x100;
4127 if (regs->tstate & TSTATE_PRIV) {
4128 +
4129 +#ifdef CONFIG_PAX_REFCOUNT
4130 + if (lvl == 6)
4131 + pax_report_refcount_overflow(regs);
4132 +#endif
4133 +
4134 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4135 die_if_kernel(buffer, regs);
4136 }
4137 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4138 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4139 {
4140 char buffer[32];
4141 -
4142 +
4143 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4144 0, lvl, SIGTRAP) == NOTIFY_STOP)
4145 return;
4146
4147 +#ifdef CONFIG_PAX_REFCOUNT
4148 + if (lvl == 6)
4149 + pax_report_refcount_overflow(regs);
4150 +#endif
4151 +
4152 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4153
4154 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4155 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4156 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4157 printk("%s" "ERROR(%d): ",
4158 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4159 - printk("TPC<%pS>\n", (void *) regs->tpc);
4160 + printk("TPC<%pA>\n", (void *) regs->tpc);
4161 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4162 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4163 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4164 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4165 smp_processor_id(),
4166 (type & 0x1) ? 'I' : 'D',
4167 regs->tpc);
4168 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4169 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4170 panic("Irrecoverable Cheetah+ parity error.");
4171 }
4172
4173 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4174 smp_processor_id(),
4175 (type & 0x1) ? 'I' : 'D',
4176 regs->tpc);
4177 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4178 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4179 }
4180
4181 struct sun4v_error_entry {
4182 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4183
4184 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4185 regs->tpc, tl);
4186 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4187 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4188 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4189 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4190 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4191 (void *) regs->u_regs[UREG_I7]);
4192 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4193 "pte[%lx] error[%lx]\n",
4194 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4195
4196 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4197 regs->tpc, tl);
4198 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4199 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4200 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4201 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4202 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4203 (void *) regs->u_regs[UREG_I7]);
4204 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4205 "pte[%lx] error[%lx]\n",
4206 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4207 fp = (unsigned long)sf->fp + STACK_BIAS;
4208 }
4209
4210 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4211 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4212 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4213 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4214 int index = tsk->curr_ret_stack;
4215 if (tsk->ret_stack && index >= graph) {
4216 pc = tsk->ret_stack[index - graph].ret;
4217 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4218 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4219 graph++;
4220 }
4221 }
4222 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4223 return (struct reg_window *) (fp + STACK_BIAS);
4224 }
4225
4226 +extern void gr_handle_kernel_exploit(void);
4227 +
4228 void die_if_kernel(char *str, struct pt_regs *regs)
4229 {
4230 static int die_counter;
4231 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4232 while (rw &&
4233 count++ < 30 &&
4234 kstack_valid(tp, (unsigned long) rw)) {
4235 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4236 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4237 (void *) rw->ins[7]);
4238
4239 rw = kernel_stack_up(rw);
4240 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4241 }
4242 user_instruction_dump ((unsigned int __user *) regs->tpc);
4243 }
4244 - if (regs->tstate & TSTATE_PRIV)
4245 + if (regs->tstate & TSTATE_PRIV) {
4246 + gr_handle_kernel_exploit();
4247 do_exit(SIGKILL);
4248 + }
4249 do_exit(SIGSEGV);
4250 }
4251 EXPORT_SYMBOL(die_if_kernel);
4252 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4253 index 76e4ac1..78f8bb1 100644
4254 --- a/arch/sparc/kernel/unaligned_64.c
4255 +++ b/arch/sparc/kernel/unaligned_64.c
4256 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4257 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4258
4259 if (__ratelimit(&ratelimit)) {
4260 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4261 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4262 regs->tpc, (void *) regs->tpc);
4263 }
4264 }
4265 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4266 index a3fc437..fea9957 100644
4267 --- a/arch/sparc/lib/Makefile
4268 +++ b/arch/sparc/lib/Makefile
4269 @@ -2,7 +2,7 @@
4270 #
4271
4272 asflags-y := -ansi -DST_DIV0=0x02
4273 -ccflags-y := -Werror
4274 +#ccflags-y := -Werror
4275
4276 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4277 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4278 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4279 index 59186e0..f747d7a 100644
4280 --- a/arch/sparc/lib/atomic_64.S
4281 +++ b/arch/sparc/lib/atomic_64.S
4282 @@ -18,7 +18,12 @@
4283 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4284 BACKOFF_SETUP(%o2)
4285 1: lduw [%o1], %g1
4286 - add %g1, %o0, %g7
4287 + addcc %g1, %o0, %g7
4288 +
4289 +#ifdef CONFIG_PAX_REFCOUNT
4290 + tvs %icc, 6
4291 +#endif
4292 +
4293 cas [%o1], %g1, %g7
4294 cmp %g1, %g7
4295 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4296 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4297 2: BACKOFF_SPIN(%o2, %o3, 1b)
4298 .size atomic_add, .-atomic_add
4299
4300 + .globl atomic_add_unchecked
4301 + .type atomic_add_unchecked,#function
4302 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4303 + BACKOFF_SETUP(%o2)
4304 +1: lduw [%o1], %g1
4305 + add %g1, %o0, %g7
4306 + cas [%o1], %g1, %g7
4307 + cmp %g1, %g7
4308 + bne,pn %icc, 2f
4309 + nop
4310 + retl
4311 + nop
4312 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4313 + .size atomic_add_unchecked, .-atomic_add_unchecked
4314 +
4315 .globl atomic_sub
4316 .type atomic_sub,#function
4317 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4318 BACKOFF_SETUP(%o2)
4319 1: lduw [%o1], %g1
4320 - sub %g1, %o0, %g7
4321 + subcc %g1, %o0, %g7
4322 +
4323 +#ifdef CONFIG_PAX_REFCOUNT
4324 + tvs %icc, 6
4325 +#endif
4326 +
4327 cas [%o1], %g1, %g7
4328 cmp %g1, %g7
4329 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4330 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4331 2: BACKOFF_SPIN(%o2, %o3, 1b)
4332 .size atomic_sub, .-atomic_sub
4333
4334 + .globl atomic_sub_unchecked
4335 + .type atomic_sub_unchecked,#function
4336 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4337 + BACKOFF_SETUP(%o2)
4338 +1: lduw [%o1], %g1
4339 + sub %g1, %o0, %g7
4340 + cas [%o1], %g1, %g7
4341 + cmp %g1, %g7
4342 + bne,pn %icc, 2f
4343 + nop
4344 + retl
4345 + nop
4346 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4347 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4348 +
4349 .globl atomic_add_ret
4350 .type atomic_add_ret,#function
4351 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4352 BACKOFF_SETUP(%o2)
4353 1: lduw [%o1], %g1
4354 - add %g1, %o0, %g7
4355 + addcc %g1, %o0, %g7
4356 +
4357 +#ifdef CONFIG_PAX_REFCOUNT
4358 + tvs %icc, 6
4359 +#endif
4360 +
4361 cas [%o1], %g1, %g7
4362 cmp %g1, %g7
4363 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4364 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4365 2: BACKOFF_SPIN(%o2, %o3, 1b)
4366 .size atomic_add_ret, .-atomic_add_ret
4367
4368 + .globl atomic_add_ret_unchecked
4369 + .type atomic_add_ret_unchecked,#function
4370 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4371 + BACKOFF_SETUP(%o2)
4372 +1: lduw [%o1], %g1
4373 + addcc %g1, %o0, %g7
4374 + cas [%o1], %g1, %g7
4375 + cmp %g1, %g7
4376 + bne,pn %icc, 2f
4377 + add %g7, %o0, %g7
4378 + sra %g7, 0, %o0
4379 + retl
4380 + nop
4381 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4382 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4383 +
4384 .globl atomic_sub_ret
4385 .type atomic_sub_ret,#function
4386 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4387 BACKOFF_SETUP(%o2)
4388 1: lduw [%o1], %g1
4389 - sub %g1, %o0, %g7
4390 + subcc %g1, %o0, %g7
4391 +
4392 +#ifdef CONFIG_PAX_REFCOUNT
4393 + tvs %icc, 6
4394 +#endif
4395 +
4396 cas [%o1], %g1, %g7
4397 cmp %g1, %g7
4398 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4399 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4400 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4401 BACKOFF_SETUP(%o2)
4402 1: ldx [%o1], %g1
4403 - add %g1, %o0, %g7
4404 + addcc %g1, %o0, %g7
4405 +
4406 +#ifdef CONFIG_PAX_REFCOUNT
4407 + tvs %xcc, 6
4408 +#endif
4409 +
4410 casx [%o1], %g1, %g7
4411 cmp %g1, %g7
4412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4413 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4414 2: BACKOFF_SPIN(%o2, %o3, 1b)
4415 .size atomic64_add, .-atomic64_add
4416
4417 + .globl atomic64_add_unchecked
4418 + .type atomic64_add_unchecked,#function
4419 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4420 + BACKOFF_SETUP(%o2)
4421 +1: ldx [%o1], %g1
4422 + addcc %g1, %o0, %g7
4423 + casx [%o1], %g1, %g7
4424 + cmp %g1, %g7
4425 + bne,pn %xcc, 2f
4426 + nop
4427 + retl
4428 + nop
4429 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4430 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4431 +
4432 .globl atomic64_sub
4433 .type atomic64_sub,#function
4434 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4435 BACKOFF_SETUP(%o2)
4436 1: ldx [%o1], %g1
4437 - sub %g1, %o0, %g7
4438 + subcc %g1, %o0, %g7
4439 +
4440 +#ifdef CONFIG_PAX_REFCOUNT
4441 + tvs %xcc, 6
4442 +#endif
4443 +
4444 casx [%o1], %g1, %g7
4445 cmp %g1, %g7
4446 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4447 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4448 2: BACKOFF_SPIN(%o2, %o3, 1b)
4449 .size atomic64_sub, .-atomic64_sub
4450
4451 + .globl atomic64_sub_unchecked
4452 + .type atomic64_sub_unchecked,#function
4453 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4454 + BACKOFF_SETUP(%o2)
4455 +1: ldx [%o1], %g1
4456 + subcc %g1, %o0, %g7
4457 + casx [%o1], %g1, %g7
4458 + cmp %g1, %g7
4459 + bne,pn %xcc, 2f
4460 + nop
4461 + retl
4462 + nop
4463 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4464 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4465 +
4466 .globl atomic64_add_ret
4467 .type atomic64_add_ret,#function
4468 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4469 BACKOFF_SETUP(%o2)
4470 1: ldx [%o1], %g1
4471 - add %g1, %o0, %g7
4472 + addcc %g1, %o0, %g7
4473 +
4474 +#ifdef CONFIG_PAX_REFCOUNT
4475 + tvs %xcc, 6
4476 +#endif
4477 +
4478 casx [%o1], %g1, %g7
4479 cmp %g1, %g7
4480 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4481 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4482 2: BACKOFF_SPIN(%o2, %o3, 1b)
4483 .size atomic64_add_ret, .-atomic64_add_ret
4484
4485 + .globl atomic64_add_ret_unchecked
4486 + .type atomic64_add_ret_unchecked,#function
4487 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4488 + BACKOFF_SETUP(%o2)
4489 +1: ldx [%o1], %g1
4490 + addcc %g1, %o0, %g7
4491 + casx [%o1], %g1, %g7
4492 + cmp %g1, %g7
4493 + bne,pn %xcc, 2f
4494 + add %g7, %o0, %g7
4495 + mov %g7, %o0
4496 + retl
4497 + nop
4498 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4499 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4500 +
4501 .globl atomic64_sub_ret
4502 .type atomic64_sub_ret,#function
4503 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4504 BACKOFF_SETUP(%o2)
4505 1: ldx [%o1], %g1
4506 - sub %g1, %o0, %g7
4507 + subcc %g1, %o0, %g7
4508 +
4509 +#ifdef CONFIG_PAX_REFCOUNT
4510 + tvs %xcc, 6
4511 +#endif
4512 +
4513 casx [%o1], %g1, %g7
4514 cmp %g1, %g7
4515 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4516 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4517 index 1b30bb3..b4a16c7 100644
4518 --- a/arch/sparc/lib/ksyms.c
4519 +++ b/arch/sparc/lib/ksyms.c
4520 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4521
4522 /* Atomic counter implementation. */
4523 EXPORT_SYMBOL(atomic_add);
4524 +EXPORT_SYMBOL(atomic_add_unchecked);
4525 EXPORT_SYMBOL(atomic_add_ret);
4526 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4527 EXPORT_SYMBOL(atomic_sub);
4528 +EXPORT_SYMBOL(atomic_sub_unchecked);
4529 EXPORT_SYMBOL(atomic_sub_ret);
4530 EXPORT_SYMBOL(atomic64_add);
4531 +EXPORT_SYMBOL(atomic64_add_unchecked);
4532 EXPORT_SYMBOL(atomic64_add_ret);
4533 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4534 EXPORT_SYMBOL(atomic64_sub);
4535 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4536 EXPORT_SYMBOL(atomic64_sub_ret);
4537
4538 /* Atomic bit operations. */
4539 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4540 index 301421c..e2535d1 100644
4541 --- a/arch/sparc/mm/Makefile
4542 +++ b/arch/sparc/mm/Makefile
4543 @@ -2,7 +2,7 @@
4544 #
4545
4546 asflags-y := -ansi
4547 -ccflags-y := -Werror
4548 +#ccflags-y := -Werror
4549
4550 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4551 obj-y += fault_$(BITS).o
4552 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4553 index 8023fd7..c8e89e9 100644
4554 --- a/arch/sparc/mm/fault_32.c
4555 +++ b/arch/sparc/mm/fault_32.c
4556 @@ -21,6 +21,9 @@
4557 #include <linux/perf_event.h>
4558 #include <linux/interrupt.h>
4559 #include <linux/kdebug.h>
4560 +#include <linux/slab.h>
4561 +#include <linux/pagemap.h>
4562 +#include <linux/compiler.h>
4563
4564 #include <asm/system.h>
4565 #include <asm/page.h>
4566 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4567 return safe_compute_effective_address(regs, insn);
4568 }
4569
4570 +#ifdef CONFIG_PAX_PAGEEXEC
4571 +#ifdef CONFIG_PAX_DLRESOLVE
4572 +static void pax_emuplt_close(struct vm_area_struct *vma)
4573 +{
4574 + vma->vm_mm->call_dl_resolve = 0UL;
4575 +}
4576 +
4577 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4578 +{
4579 + unsigned int *kaddr;
4580 +
4581 + vmf->page = alloc_page(GFP_HIGHUSER);
4582 + if (!vmf->page)
4583 + return VM_FAULT_OOM;
4584 +
4585 + kaddr = kmap(vmf->page);
4586 + memset(kaddr, 0, PAGE_SIZE);
4587 + kaddr[0] = 0x9DE3BFA8U; /* save */
4588 + flush_dcache_page(vmf->page);
4589 + kunmap(vmf->page);
4590 + return VM_FAULT_MAJOR;
4591 +}
4592 +
4593 +static const struct vm_operations_struct pax_vm_ops = {
4594 + .close = pax_emuplt_close,
4595 + .fault = pax_emuplt_fault
4596 +};
4597 +
4598 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4599 +{
4600 + int ret;
4601 +
4602 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4603 + vma->vm_mm = current->mm;
4604 + vma->vm_start = addr;
4605 + vma->vm_end = addr + PAGE_SIZE;
4606 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4607 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4608 + vma->vm_ops = &pax_vm_ops;
4609 +
4610 + ret = insert_vm_struct(current->mm, vma);
4611 + if (ret)
4612 + return ret;
4613 +
4614 + ++current->mm->total_vm;
4615 + return 0;
4616 +}
4617 +#endif
4618 +
4619 +/*
4620 + * PaX: decide what to do with offenders (regs->pc = fault address)
4621 + *
4622 + * returns 1 when task should be killed
4623 + * 2 when patched PLT trampoline was detected
4624 + * 3 when unpatched PLT trampoline was detected
4625 + */
4626 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4627 +{
4628 +
4629 +#ifdef CONFIG_PAX_EMUPLT
4630 + int err;
4631 +
4632 + do { /* PaX: patched PLT emulation #1 */
4633 + unsigned int sethi1, sethi2, jmpl;
4634 +
4635 + err = get_user(sethi1, (unsigned int *)regs->pc);
4636 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4637 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4638 +
4639 + if (err)
4640 + break;
4641 +
4642 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4643 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4644 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4645 + {
4646 + unsigned int addr;
4647 +
4648 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4649 + addr = regs->u_regs[UREG_G1];
4650 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4651 + regs->pc = addr;
4652 + regs->npc = addr+4;
4653 + return 2;
4654 + }
4655 + } while (0);
4656 +
4657 + { /* PaX: patched PLT emulation #2 */
4658 + unsigned int ba;
4659 +
4660 + err = get_user(ba, (unsigned int *)regs->pc);
4661 +
4662 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4663 + unsigned int addr;
4664 +
4665 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4666 + regs->pc = addr;
4667 + regs->npc = addr+4;
4668 + return 2;
4669 + }
4670 + }
4671 +
4672 + do { /* PaX: patched PLT emulation #3 */
4673 + unsigned int sethi, jmpl, nop;
4674 +
4675 + err = get_user(sethi, (unsigned int *)regs->pc);
4676 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4677 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4678 +
4679 + if (err)
4680 + break;
4681 +
4682 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4683 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4684 + nop == 0x01000000U)
4685 + {
4686 + unsigned int addr;
4687 +
4688 + addr = (sethi & 0x003FFFFFU) << 10;
4689 + regs->u_regs[UREG_G1] = addr;
4690 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4691 + regs->pc = addr;
4692 + regs->npc = addr+4;
4693 + return 2;
4694 + }
4695 + } while (0);
4696 +
4697 + do { /* PaX: unpatched PLT emulation step 1 */
4698 + unsigned int sethi, ba, nop;
4699 +
4700 + err = get_user(sethi, (unsigned int *)regs->pc);
4701 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4702 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4703 +
4704 + if (err)
4705 + break;
4706 +
4707 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4708 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4709 + nop == 0x01000000U)
4710 + {
4711 + unsigned int addr, save, call;
4712 +
4713 + if ((ba & 0xFFC00000U) == 0x30800000U)
4714 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4715 + else
4716 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4717 +
4718 + err = get_user(save, (unsigned int *)addr);
4719 + err |= get_user(call, (unsigned int *)(addr+4));
4720 + err |= get_user(nop, (unsigned int *)(addr+8));
4721 + if (err)
4722 + break;
4723 +
4724 +#ifdef CONFIG_PAX_DLRESOLVE
4725 + if (save == 0x9DE3BFA8U &&
4726 + (call & 0xC0000000U) == 0x40000000U &&
4727 + nop == 0x01000000U)
4728 + {
4729 + struct vm_area_struct *vma;
4730 + unsigned long call_dl_resolve;
4731 +
4732 + down_read(&current->mm->mmap_sem);
4733 + call_dl_resolve = current->mm->call_dl_resolve;
4734 + up_read(&current->mm->mmap_sem);
4735 + if (likely(call_dl_resolve))
4736 + goto emulate;
4737 +
4738 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4739 +
4740 + down_write(&current->mm->mmap_sem);
4741 + if (current->mm->call_dl_resolve) {
4742 + call_dl_resolve = current->mm->call_dl_resolve;
4743 + up_write(&current->mm->mmap_sem);
4744 + if (vma)
4745 + kmem_cache_free(vm_area_cachep, vma);
4746 + goto emulate;
4747 + }
4748 +
4749 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4750 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4751 + up_write(&current->mm->mmap_sem);
4752 + if (vma)
4753 + kmem_cache_free(vm_area_cachep, vma);
4754 + return 1;
4755 + }
4756 +
4757 + if (pax_insert_vma(vma, call_dl_resolve)) {
4758 + up_write(&current->mm->mmap_sem);
4759 + kmem_cache_free(vm_area_cachep, vma);
4760 + return 1;
4761 + }
4762 +
4763 + current->mm->call_dl_resolve = call_dl_resolve;
4764 + up_write(&current->mm->mmap_sem);
4765 +
4766 +emulate:
4767 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4768 + regs->pc = call_dl_resolve;
4769 + regs->npc = addr+4;
4770 + return 3;
4771 + }
4772 +#endif
4773 +
4774 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4775 + if ((save & 0xFFC00000U) == 0x05000000U &&
4776 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4777 + nop == 0x01000000U)
4778 + {
4779 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4780 + regs->u_regs[UREG_G2] = addr + 4;
4781 + addr = (save & 0x003FFFFFU) << 10;
4782 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4783 + regs->pc = addr;
4784 + regs->npc = addr+4;
4785 + return 3;
4786 + }
4787 + }
4788 + } while (0);
4789 +
4790 + do { /* PaX: unpatched PLT emulation step 2 */
4791 + unsigned int save, call, nop;
4792 +
4793 + err = get_user(save, (unsigned int *)(regs->pc-4));
4794 + err |= get_user(call, (unsigned int *)regs->pc);
4795 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4796 + if (err)
4797 + break;
4798 +
4799 + if (save == 0x9DE3BFA8U &&
4800 + (call & 0xC0000000U) == 0x40000000U &&
4801 + nop == 0x01000000U)
4802 + {
4803 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4804 +
4805 + regs->u_regs[UREG_RETPC] = regs->pc;
4806 + regs->pc = dl_resolve;
4807 + regs->npc = dl_resolve+4;
4808 + return 3;
4809 + }
4810 + } while (0);
4811 +#endif
4812 +
4813 + return 1;
4814 +}
4815 +
4816 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4817 +{
4818 + unsigned long i;
4819 +
4820 + printk(KERN_ERR "PAX: bytes at PC: ");
4821 + for (i = 0; i < 8; i++) {
4822 + unsigned int c;
4823 + if (get_user(c, (unsigned int *)pc+i))
4824 + printk(KERN_CONT "???????? ");
4825 + else
4826 + printk(KERN_CONT "%08x ", c);
4827 + }
4828 + printk("\n");
4829 +}
4830 +#endif
4831 +
4832 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4833 int text_fault)
4834 {
4835 @@ -280,6 +545,24 @@ good_area:
4836 if(!(vma->vm_flags & VM_WRITE))
4837 goto bad_area;
4838 } else {
4839 +
4840 +#ifdef CONFIG_PAX_PAGEEXEC
4841 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4842 + up_read(&mm->mmap_sem);
4843 + switch (pax_handle_fetch_fault(regs)) {
4844 +
4845 +#ifdef CONFIG_PAX_EMUPLT
4846 + case 2:
4847 + case 3:
4848 + return;
4849 +#endif
4850 +
4851 + }
4852 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4853 + do_group_exit(SIGKILL);
4854 + }
4855 +#endif
4856 +
4857 /* Allow reads even for write-only mappings */
4858 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4859 goto bad_area;
4860 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4861 index 504c062..6fcb9c6 100644
4862 --- a/arch/sparc/mm/fault_64.c
4863 +++ b/arch/sparc/mm/fault_64.c
4864 @@ -21,6 +21,9 @@
4865 #include <linux/kprobes.h>
4866 #include <linux/kdebug.h>
4867 #include <linux/percpu.h>
4868 +#include <linux/slab.h>
4869 +#include <linux/pagemap.h>
4870 +#include <linux/compiler.h>
4871
4872 #include <asm/page.h>
4873 #include <asm/pgtable.h>
4874 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4875 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4876 regs->tpc);
4877 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4878 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4879 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4880 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4881 dump_stack();
4882 unhandled_fault(regs->tpc, current, regs);
4883 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4884 show_regs(regs);
4885 }
4886
4887 +#ifdef CONFIG_PAX_PAGEEXEC
4888 +#ifdef CONFIG_PAX_DLRESOLVE
4889 +static void pax_emuplt_close(struct vm_area_struct *vma)
4890 +{
4891 + vma->vm_mm->call_dl_resolve = 0UL;
4892 +}
4893 +
4894 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4895 +{
4896 + unsigned int *kaddr;
4897 +
4898 + vmf->page = alloc_page(GFP_HIGHUSER);
4899 + if (!vmf->page)
4900 + return VM_FAULT_OOM;
4901 +
4902 + kaddr = kmap(vmf->page);
4903 + memset(kaddr, 0, PAGE_SIZE);
4904 + kaddr[0] = 0x9DE3BFA8U; /* save */
4905 + flush_dcache_page(vmf->page);
4906 + kunmap(vmf->page);
4907 + return VM_FAULT_MAJOR;
4908 +}
4909 +
4910 +static const struct vm_operations_struct pax_vm_ops = {
4911 + .close = pax_emuplt_close,
4912 + .fault = pax_emuplt_fault
4913 +};
4914 +
4915 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4916 +{
4917 + int ret;
4918 +
4919 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4920 + vma->vm_mm = current->mm;
4921 + vma->vm_start = addr;
4922 + vma->vm_end = addr + PAGE_SIZE;
4923 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4924 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4925 + vma->vm_ops = &pax_vm_ops;
4926 +
4927 + ret = insert_vm_struct(current->mm, vma);
4928 + if (ret)
4929 + return ret;
4930 +
4931 + ++current->mm->total_vm;
4932 + return 0;
4933 +}
4934 +#endif
4935 +
4936 +/*
4937 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4938 + *
4939 + * returns 1 when task should be killed
4940 + * 2 when patched PLT trampoline was detected
4941 + * 3 when unpatched PLT trampoline was detected
4942 + */
4943 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4944 +{
4945 +
4946 +#ifdef CONFIG_PAX_EMUPLT
4947 + int err;
4948 +
4949 + do { /* PaX: patched PLT emulation #1 */
4950 + unsigned int sethi1, sethi2, jmpl;
4951 +
4952 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4953 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4954 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4955 +
4956 + if (err)
4957 + break;
4958 +
4959 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4960 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4961 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4962 + {
4963 + unsigned long addr;
4964 +
4965 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4966 + addr = regs->u_regs[UREG_G1];
4967 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4968 +
4969 + if (test_thread_flag(TIF_32BIT))
4970 + addr &= 0xFFFFFFFFUL;
4971 +
4972 + regs->tpc = addr;
4973 + regs->tnpc = addr+4;
4974 + return 2;
4975 + }
4976 + } while (0);
4977 +
4978 + { /* PaX: patched PLT emulation #2 */
4979 + unsigned int ba;
4980 +
4981 + err = get_user(ba, (unsigned int *)regs->tpc);
4982 +
4983 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4984 + unsigned long addr;
4985 +
4986 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4987 +
4988 + if (test_thread_flag(TIF_32BIT))
4989 + addr &= 0xFFFFFFFFUL;
4990 +
4991 + regs->tpc = addr;
4992 + regs->tnpc = addr+4;
4993 + return 2;
4994 + }
4995 + }
4996 +
4997 + do { /* PaX: patched PLT emulation #3 */
4998 + unsigned int sethi, jmpl, nop;
4999 +
5000 + err = get_user(sethi, (unsigned int *)regs->tpc);
5001 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5002 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5003 +
5004 + if (err)
5005 + break;
5006 +
5007 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5008 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5009 + nop == 0x01000000U)
5010 + {
5011 + unsigned long addr;
5012 +
5013 + addr = (sethi & 0x003FFFFFU) << 10;
5014 + regs->u_regs[UREG_G1] = addr;
5015 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5016 +
5017 + if (test_thread_flag(TIF_32BIT))
5018 + addr &= 0xFFFFFFFFUL;
5019 +
5020 + regs->tpc = addr;
5021 + regs->tnpc = addr+4;
5022 + return 2;
5023 + }
5024 + } while (0);
5025 +
5026 + do { /* PaX: patched PLT emulation #4 */
5027 + unsigned int sethi, mov1, call, mov2;
5028 +
5029 + err = get_user(sethi, (unsigned int *)regs->tpc);
5030 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5031 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5032 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5033 +
5034 + if (err)
5035 + break;
5036 +
5037 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5038 + mov1 == 0x8210000FU &&
5039 + (call & 0xC0000000U) == 0x40000000U &&
5040 + mov2 == 0x9E100001U)
5041 + {
5042 + unsigned long addr;
5043 +
5044 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5045 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5046 +
5047 + if (test_thread_flag(TIF_32BIT))
5048 + addr &= 0xFFFFFFFFUL;
5049 +
5050 + regs->tpc = addr;
5051 + regs->tnpc = addr+4;
5052 + return 2;
5053 + }
5054 + } while (0);
5055 +
5056 + do { /* PaX: patched PLT emulation #5 */
5057 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5058 +
5059 + err = get_user(sethi, (unsigned int *)regs->tpc);
5060 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5061 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5062 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5063 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5064 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5065 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5066 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5067 +
5068 + if (err)
5069 + break;
5070 +
5071 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5072 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5073 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5074 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5075 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5076 + sllx == 0x83287020U &&
5077 + jmpl == 0x81C04005U &&
5078 + nop == 0x01000000U)
5079 + {
5080 + unsigned long addr;
5081 +
5082 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5083 + regs->u_regs[UREG_G1] <<= 32;
5084 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5085 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5086 + regs->tpc = addr;
5087 + regs->tnpc = addr+4;
5088 + return 2;
5089 + }
5090 + } while (0);
5091 +
5092 + do { /* PaX: patched PLT emulation #6 */
5093 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5094 +
5095 + err = get_user(sethi, (unsigned int *)regs->tpc);
5096 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5097 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5098 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5099 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5100 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5101 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5102 +
5103 + if (err)
5104 + break;
5105 +
5106 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5107 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5108 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5109 + sllx == 0x83287020U &&
5110 + (or & 0xFFFFE000U) == 0x8A116000U &&
5111 + jmpl == 0x81C04005U &&
5112 + nop == 0x01000000U)
5113 + {
5114 + unsigned long addr;
5115 +
5116 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5117 + regs->u_regs[UREG_G1] <<= 32;
5118 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5119 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5120 + regs->tpc = addr;
5121 + regs->tnpc = addr+4;
5122 + return 2;
5123 + }
5124 + } while (0);
5125 +
5126 + do { /* PaX: unpatched PLT emulation step 1 */
5127 + unsigned int sethi, ba, nop;
5128 +
5129 + err = get_user(sethi, (unsigned int *)regs->tpc);
5130 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5131 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5132 +
5133 + if (err)
5134 + break;
5135 +
5136 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5137 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5138 + nop == 0x01000000U)
5139 + {
5140 + unsigned long addr;
5141 + unsigned int save, call;
5142 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5143 +
5144 + if ((ba & 0xFFC00000U) == 0x30800000U)
5145 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5146 + else
5147 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5148 +
5149 + if (test_thread_flag(TIF_32BIT))
5150 + addr &= 0xFFFFFFFFUL;
5151 +
5152 + err = get_user(save, (unsigned int *)addr);
5153 + err |= get_user(call, (unsigned int *)(addr+4));
5154 + err |= get_user(nop, (unsigned int *)(addr+8));
5155 + if (err)
5156 + break;
5157 +
5158 +#ifdef CONFIG_PAX_DLRESOLVE
5159 + if (save == 0x9DE3BFA8U &&
5160 + (call & 0xC0000000U) == 0x40000000U &&
5161 + nop == 0x01000000U)
5162 + {
5163 + struct vm_area_struct *vma;
5164 + unsigned long call_dl_resolve;
5165 +
5166 + down_read(&current->mm->mmap_sem);
5167 + call_dl_resolve = current->mm->call_dl_resolve;
5168 + up_read(&current->mm->mmap_sem);
5169 + if (likely(call_dl_resolve))
5170 + goto emulate;
5171 +
5172 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5173 +
5174 + down_write(&current->mm->mmap_sem);
5175 + if (current->mm->call_dl_resolve) {
5176 + call_dl_resolve = current->mm->call_dl_resolve;
5177 + up_write(&current->mm->mmap_sem);
5178 + if (vma)
5179 + kmem_cache_free(vm_area_cachep, vma);
5180 + goto emulate;
5181 + }
5182 +
5183 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5184 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5185 + up_write(&current->mm->mmap_sem);
5186 + if (vma)
5187 + kmem_cache_free(vm_area_cachep, vma);
5188 + return 1;
5189 + }
5190 +
5191 + if (pax_insert_vma(vma, call_dl_resolve)) {
5192 + up_write(&current->mm->mmap_sem);
5193 + kmem_cache_free(vm_area_cachep, vma);
5194 + return 1;
5195 + }
5196 +
5197 + current->mm->call_dl_resolve = call_dl_resolve;
5198 + up_write(&current->mm->mmap_sem);
5199 +
5200 +emulate:
5201 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5202 + regs->tpc = call_dl_resolve;
5203 + regs->tnpc = addr+4;
5204 + return 3;
5205 + }
5206 +#endif
5207 +
5208 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5209 + if ((save & 0xFFC00000U) == 0x05000000U &&
5210 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5211 + nop == 0x01000000U)
5212 + {
5213 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5214 + regs->u_regs[UREG_G2] = addr + 4;
5215 + addr = (save & 0x003FFFFFU) << 10;
5216 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5217 +
5218 + if (test_thread_flag(TIF_32BIT))
5219 + addr &= 0xFFFFFFFFUL;
5220 +
5221 + regs->tpc = addr;
5222 + regs->tnpc = addr+4;
5223 + return 3;
5224 + }
5225 +
5226 + /* PaX: 64-bit PLT stub */
5227 + err = get_user(sethi1, (unsigned int *)addr);
5228 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5229 + err |= get_user(or1, (unsigned int *)(addr+8));
5230 + err |= get_user(or2, (unsigned int *)(addr+12));
5231 + err |= get_user(sllx, (unsigned int *)(addr+16));
5232 + err |= get_user(add, (unsigned int *)(addr+20));
5233 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5234 + err |= get_user(nop, (unsigned int *)(addr+28));
5235 + if (err)
5236 + break;
5237 +
5238 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5239 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5240 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5241 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5242 + sllx == 0x89293020U &&
5243 + add == 0x8A010005U &&
5244 + jmpl == 0x89C14000U &&
5245 + nop == 0x01000000U)
5246 + {
5247 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5248 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5249 + regs->u_regs[UREG_G4] <<= 32;
5250 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5251 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5252 + regs->u_regs[UREG_G4] = addr + 24;
5253 + addr = regs->u_regs[UREG_G5];
5254 + regs->tpc = addr;
5255 + regs->tnpc = addr+4;
5256 + return 3;
5257 + }
5258 + }
5259 + } while (0);
5260 +
5261 +#ifdef CONFIG_PAX_DLRESOLVE
5262 + do { /* PaX: unpatched PLT emulation step 2 */
5263 + unsigned int save, call, nop;
5264 +
5265 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5266 + err |= get_user(call, (unsigned int *)regs->tpc);
5267 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5268 + if (err)
5269 + break;
5270 +
5271 + if (save == 0x9DE3BFA8U &&
5272 + (call & 0xC0000000U) == 0x40000000U &&
5273 + nop == 0x01000000U)
5274 + {
5275 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5276 +
5277 + if (test_thread_flag(TIF_32BIT))
5278 + dl_resolve &= 0xFFFFFFFFUL;
5279 +
5280 + regs->u_regs[UREG_RETPC] = regs->tpc;
5281 + regs->tpc = dl_resolve;
5282 + regs->tnpc = dl_resolve+4;
5283 + return 3;
5284 + }
5285 + } while (0);
5286 +#endif
5287 +
5288 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5289 + unsigned int sethi, ba, nop;
5290 +
5291 + err = get_user(sethi, (unsigned int *)regs->tpc);
5292 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5293 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5294 +
5295 + if (err)
5296 + break;
5297 +
5298 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5299 + (ba & 0xFFF00000U) == 0x30600000U &&
5300 + nop == 0x01000000U)
5301 + {
5302 + unsigned long addr;
5303 +
5304 + addr = (sethi & 0x003FFFFFU) << 10;
5305 + regs->u_regs[UREG_G1] = addr;
5306 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5307 +
5308 + if (test_thread_flag(TIF_32BIT))
5309 + addr &= 0xFFFFFFFFUL;
5310 +
5311 + regs->tpc = addr;
5312 + regs->tnpc = addr+4;
5313 + return 2;
5314 + }
5315 + } while (0);
5316 +
5317 +#endif
5318 +
5319 + return 1;
5320 +}
5321 +
5322 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5323 +{
5324 + unsigned long i;
5325 +
5326 + printk(KERN_ERR "PAX: bytes at PC: ");
5327 + for (i = 0; i < 8; i++) {
5328 + unsigned int c;
5329 + if (get_user(c, (unsigned int *)pc+i))
5330 + printk(KERN_CONT "???????? ");
5331 + else
5332 + printk(KERN_CONT "%08x ", c);
5333 + }
5334 + printk("\n");
5335 +}
5336 +#endif
5337 +
5338 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5339 {
5340 struct mm_struct *mm = current->mm;
5341 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5342 if (!vma)
5343 goto bad_area;
5344
5345 +#ifdef CONFIG_PAX_PAGEEXEC
5346 + /* PaX: detect ITLB misses on non-exec pages */
5347 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5348 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5349 + {
5350 + if (address != regs->tpc)
5351 + goto good_area;
5352 +
5353 + up_read(&mm->mmap_sem);
5354 + switch (pax_handle_fetch_fault(regs)) {
5355 +
5356 +#ifdef CONFIG_PAX_EMUPLT
5357 + case 2:
5358 + case 3:
5359 + return;
5360 +#endif
5361 +
5362 + }
5363 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5364 + do_group_exit(SIGKILL);
5365 + }
5366 +#endif
5367 +
5368 /* Pure DTLB misses do not tell us whether the fault causing
5369 * load/store/atomic was a write or not, it only says that there
5370 * was no match. So in such a case we (carefully) read the
5371 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5372 index 07e1453..0a7d9e9 100644
5373 --- a/arch/sparc/mm/hugetlbpage.c
5374 +++ b/arch/sparc/mm/hugetlbpage.c
5375 @@ -67,7 +67,7 @@ full_search:
5376 }
5377 return -ENOMEM;
5378 }
5379 - if (likely(!vma || addr + len <= vma->vm_start)) {
5380 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5381 /*
5382 * Remember the place where we stopped the search:
5383 */
5384 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5385 /* make sure it can fit in the remaining address space */
5386 if (likely(addr > len)) {
5387 vma = find_vma(mm, addr-len);
5388 - if (!vma || addr <= vma->vm_start) {
5389 + if (check_heap_stack_gap(vma, addr - len, len)) {
5390 /* remember the address as a hint for next time */
5391 return (mm->free_area_cache = addr-len);
5392 }
5393 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5394 if (unlikely(mm->mmap_base < len))
5395 goto bottomup;
5396
5397 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5398 + addr = mm->mmap_base - len;
5399
5400 do {
5401 + addr &= HPAGE_MASK;
5402 /*
5403 * Lookup failure means no vma is above this address,
5404 * else if new region fits below vma->vm_start,
5405 * return with success:
5406 */
5407 vma = find_vma(mm, addr);
5408 - if (likely(!vma || addr+len <= vma->vm_start)) {
5409 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5410 /* remember the address as a hint for next time */
5411 return (mm->free_area_cache = addr);
5412 }
5413 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 mm->cached_hole_size = vma->vm_start - addr;
5415
5416 /* try just below the current vma->vm_start */
5417 - addr = (vma->vm_start-len) & HPAGE_MASK;
5418 - } while (likely(len < vma->vm_start));
5419 + addr = skip_heap_stack_gap(vma, len);
5420 + } while (!IS_ERR_VALUE(addr));
5421
5422 bottomup:
5423 /*
5424 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5425 if (addr) {
5426 addr = ALIGN(addr, HPAGE_SIZE);
5427 vma = find_vma(mm, addr);
5428 - if (task_size - len >= addr &&
5429 - (!vma || addr + len <= vma->vm_start))
5430 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5431 return addr;
5432 }
5433 if (mm->get_unmapped_area == arch_get_unmapped_area)
5434 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5435 index 7b00de6..78239f4 100644
5436 --- a/arch/sparc/mm/init_32.c
5437 +++ b/arch/sparc/mm/init_32.c
5438 @@ -316,6 +316,9 @@ extern void device_scan(void);
5439 pgprot_t PAGE_SHARED __read_mostly;
5440 EXPORT_SYMBOL(PAGE_SHARED);
5441
5442 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5443 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5444 +
5445 void __init paging_init(void)
5446 {
5447 switch(sparc_cpu_model) {
5448 @@ -344,17 +347,17 @@ void __init paging_init(void)
5449
5450 /* Initialize the protection map with non-constant, MMU dependent values. */
5451 protection_map[0] = PAGE_NONE;
5452 - protection_map[1] = PAGE_READONLY;
5453 - protection_map[2] = PAGE_COPY;
5454 - protection_map[3] = PAGE_COPY;
5455 + protection_map[1] = PAGE_READONLY_NOEXEC;
5456 + protection_map[2] = PAGE_COPY_NOEXEC;
5457 + protection_map[3] = PAGE_COPY_NOEXEC;
5458 protection_map[4] = PAGE_READONLY;
5459 protection_map[5] = PAGE_READONLY;
5460 protection_map[6] = PAGE_COPY;
5461 protection_map[7] = PAGE_COPY;
5462 protection_map[8] = PAGE_NONE;
5463 - protection_map[9] = PAGE_READONLY;
5464 - protection_map[10] = PAGE_SHARED;
5465 - protection_map[11] = PAGE_SHARED;
5466 + protection_map[9] = PAGE_READONLY_NOEXEC;
5467 + protection_map[10] = PAGE_SHARED_NOEXEC;
5468 + protection_map[11] = PAGE_SHARED_NOEXEC;
5469 protection_map[12] = PAGE_READONLY;
5470 protection_map[13] = PAGE_READONLY;
5471 protection_map[14] = PAGE_SHARED;
5472 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5473 index cbef74e..c38fead 100644
5474 --- a/arch/sparc/mm/srmmu.c
5475 +++ b/arch/sparc/mm/srmmu.c
5476 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5477 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5478 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5479 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5480 +
5481 +#ifdef CONFIG_PAX_PAGEEXEC
5482 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5483 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5484 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5485 +#endif
5486 +
5487 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5488 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5489
5490 diff --git a/arch/um/Makefile b/arch/um/Makefile
5491 index 7730af6..cce5b19 100644
5492 --- a/arch/um/Makefile
5493 +++ b/arch/um/Makefile
5494 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5495 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5496 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5497
5498 +ifdef CONSTIFY_PLUGIN
5499 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5500 +endif
5501 +
5502 #This will adjust *FLAGS accordingly to the platform.
5503 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5504
5505 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5506 index 6c03acd..a5e0215 100644
5507 --- a/arch/um/include/asm/kmap_types.h
5508 +++ b/arch/um/include/asm/kmap_types.h
5509 @@ -23,6 +23,7 @@ enum km_type {
5510 KM_IRQ1,
5511 KM_SOFTIRQ0,
5512 KM_SOFTIRQ1,
5513 + KM_CLEARPAGE,
5514 KM_TYPE_NR
5515 };
5516
5517 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5518 index 7cfc3ce..cbd1a58 100644
5519 --- a/arch/um/include/asm/page.h
5520 +++ b/arch/um/include/asm/page.h
5521 @@ -14,6 +14,9 @@
5522 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5523 #define PAGE_MASK (~(PAGE_SIZE-1))
5524
5525 +#define ktla_ktva(addr) (addr)
5526 +#define ktva_ktla(addr) (addr)
5527 +
5528 #ifndef __ASSEMBLY__
5529
5530 struct page;
5531 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5532 index c533835..84db18e 100644
5533 --- a/arch/um/kernel/process.c
5534 +++ b/arch/um/kernel/process.c
5535 @@ -406,22 +406,6 @@ int singlestepping(void * t)
5536 return 2;
5537 }
5538
5539 -/*
5540 - * Only x86 and x86_64 have an arch_align_stack().
5541 - * All other arches have "#define arch_align_stack(x) (x)"
5542 - * in their asm/system.h
5543 - * As this is included in UML from asm-um/system-generic.h,
5544 - * we can use it to behave as the subarch does.
5545 - */
5546 -#ifndef arch_align_stack
5547 -unsigned long arch_align_stack(unsigned long sp)
5548 -{
5549 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5550 - sp -= get_random_int() % 8192;
5551 - return sp & ~0xf;
5552 -}
5553 -#endif
5554 -
5555 unsigned long get_wchan(struct task_struct *p)
5556 {
5557 unsigned long stack_page, sp, ip;
5558 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5559 index efb4294..61bc18c 100644
5560 --- a/arch/x86/Kconfig
5561 +++ b/arch/x86/Kconfig
5562 @@ -235,7 +235,7 @@ config X86_HT
5563
5564 config X86_32_LAZY_GS
5565 def_bool y
5566 - depends on X86_32 && !CC_STACKPROTECTOR
5567 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5568
5569 config ARCH_HWEIGHT_CFLAGS
5570 string
5571 @@ -1022,7 +1022,7 @@ choice
5572
5573 config NOHIGHMEM
5574 bool "off"
5575 - depends on !X86_NUMAQ
5576 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5577 ---help---
5578 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5579 However, the address space of 32-bit x86 processors is only 4
5580 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
5581
5582 config HIGHMEM4G
5583 bool "4GB"
5584 - depends on !X86_NUMAQ
5585 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5586 ---help---
5587 Select this if you have a 32-bit processor and between 1 and 4
5588 gigabytes of physical RAM.
5589 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5590 hex
5591 default 0xB0000000 if VMSPLIT_3G_OPT
5592 default 0x80000000 if VMSPLIT_2G
5593 - default 0x78000000 if VMSPLIT_2G_OPT
5594 + default 0x70000000 if VMSPLIT_2G_OPT
5595 default 0x40000000 if VMSPLIT_1G
5596 default 0xC0000000
5597 depends on X86_32
5598 @@ -1496,6 +1496,7 @@ config SECCOMP
5599
5600 config CC_STACKPROTECTOR
5601 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5602 + depends on X86_64 || !PAX_MEMORY_UDEREF
5603 ---help---
5604 This option turns on the -fstack-protector GCC feature. This
5605 feature puts, at the beginning of functions, a canary value on
5606 @@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5607 config PHYSICAL_START
5608 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5609 default "0x1000000"
5610 + range 0x400000 0x40000000
5611 ---help---
5612 This gives the physical address where the kernel is loaded.
5613
5614 @@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5615 config PHYSICAL_ALIGN
5616 hex "Alignment value to which kernel should be aligned" if X86_32
5617 default "0x1000000"
5618 + range 0x400000 0x1000000 if PAX_KERNEXEC
5619 range 0x2000 0x1000000
5620 ---help---
5621 This value puts the alignment restrictions on physical address
5622 @@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5623 Say N if you want to disable CPU hotplug.
5624
5625 config COMPAT_VDSO
5626 - def_bool y
5627 + def_bool n
5628 prompt "Compat VDSO support"
5629 depends on X86_32 || IA32_EMULATION
5630 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5631 ---help---
5632 Map the 32-bit VDSO to the predictable old-style address too.
5633
5634 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5635 index e3ca7e0..b30b28a 100644
5636 --- a/arch/x86/Kconfig.cpu
5637 +++ b/arch/x86/Kconfig.cpu
5638 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5639
5640 config X86_F00F_BUG
5641 def_bool y
5642 - depends on M586MMX || M586TSC || M586 || M486 || M386
5643 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5644
5645 config X86_INVD_BUG
5646 def_bool y
5647 @@ -365,7 +365,7 @@ config X86_POPAD_OK
5648
5649 config X86_ALIGNMENT_16
5650 def_bool y
5651 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5652 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5653
5654 config X86_INTEL_USERCOPY
5655 def_bool y
5656 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
5657 # generates cmov.
5658 config X86_CMOV
5659 def_bool y
5660 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5661 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5662
5663 config X86_MINIMUM_CPU_FAMILY
5664 int
5665 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5666 index bf56e17..05f9891 100644
5667 --- a/arch/x86/Kconfig.debug
5668 +++ b/arch/x86/Kconfig.debug
5669 @@ -81,7 +81,7 @@ config X86_PTDUMP
5670 config DEBUG_RODATA
5671 bool "Write protect kernel read-only data structures"
5672 default y
5673 - depends on DEBUG_KERNEL
5674 + depends on DEBUG_KERNEL && BROKEN
5675 ---help---
5676 Mark the kernel read-only data as write-protected in the pagetables,
5677 in order to catch accidental (and incorrect) writes to such const
5678 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5679
5680 config DEBUG_SET_MODULE_RONX
5681 bool "Set loadable kernel module data as NX and text as RO"
5682 - depends on MODULES
5683 + depends on MODULES && BROKEN
5684 ---help---
5685 This option helps catch unintended modifications to loadable
5686 kernel module's text and read-only data. It also prevents execution
5687 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5688 index b02e509..2631e48 100644
5689 --- a/arch/x86/Makefile
5690 +++ b/arch/x86/Makefile
5691 @@ -46,6 +46,7 @@ else
5692 UTS_MACHINE := x86_64
5693 CHECKFLAGS += -D__x86_64__ -m64
5694
5695 + biarch := $(call cc-option,-m64)
5696 KBUILD_AFLAGS += -m64
5697 KBUILD_CFLAGS += -m64
5698
5699 @@ -195,3 +196,12 @@ define archhelp
5700 echo ' FDARGS="..." arguments for the booted kernel'
5701 echo ' FDINITRD=file initrd for the booted kernel'
5702 endef
5703 +
5704 +define OLD_LD
5705 +
5706 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5707 +*** Please upgrade your binutils to 2.18 or newer
5708 +endef
5709 +
5710 +archprepare:
5711 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5712 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5713 index 95365a8..52f857b 100644
5714 --- a/arch/x86/boot/Makefile
5715 +++ b/arch/x86/boot/Makefile
5716 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5717 $(call cc-option, -fno-stack-protector) \
5718 $(call cc-option, -mpreferred-stack-boundary=2)
5719 KBUILD_CFLAGS += $(call cc-option, -m32)
5720 +ifdef CONSTIFY_PLUGIN
5721 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5722 +endif
5723 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5724 GCOV_PROFILE := n
5725
5726 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5727 index 878e4b9..20537ab 100644
5728 --- a/arch/x86/boot/bitops.h
5729 +++ b/arch/x86/boot/bitops.h
5730 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5731 u8 v;
5732 const u32 *p = (const u32 *)addr;
5733
5734 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5735 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5736 return v;
5737 }
5738
5739 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5740
5741 static inline void set_bit(int nr, void *addr)
5742 {
5743 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5744 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5745 }
5746
5747 #endif /* BOOT_BITOPS_H */
5748 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5749 index c7093bd..d4247ffe0 100644
5750 --- a/arch/x86/boot/boot.h
5751 +++ b/arch/x86/boot/boot.h
5752 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5753 static inline u16 ds(void)
5754 {
5755 u16 seg;
5756 - asm("movw %%ds,%0" : "=rm" (seg));
5757 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5758 return seg;
5759 }
5760
5761 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5762 static inline int memcmp(const void *s1, const void *s2, size_t len)
5763 {
5764 u8 diff;
5765 - asm("repe; cmpsb; setnz %0"
5766 + asm volatile("repe; cmpsb; setnz %0"
5767 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5768 return diff;
5769 }
5770 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5771 index 09664ef..edc5d03 100644
5772 --- a/arch/x86/boot/compressed/Makefile
5773 +++ b/arch/x86/boot/compressed/Makefile
5774 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5775 KBUILD_CFLAGS += $(cflags-y)
5776 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5777 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5778 +ifdef CONSTIFY_PLUGIN
5779 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5780 +endif
5781
5782 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5783 GCOV_PROFILE := n
5784 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5785 index 67a655a..b924059 100644
5786 --- a/arch/x86/boot/compressed/head_32.S
5787 +++ b/arch/x86/boot/compressed/head_32.S
5788 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5789 notl %eax
5790 andl %eax, %ebx
5791 #else
5792 - movl $LOAD_PHYSICAL_ADDR, %ebx
5793 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5794 #endif
5795
5796 /* Target address to relocate to for decompression */
5797 @@ -162,7 +162,7 @@ relocated:
5798 * and where it was actually loaded.
5799 */
5800 movl %ebp, %ebx
5801 - subl $LOAD_PHYSICAL_ADDR, %ebx
5802 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5803 jz 2f /* Nothing to be done if loaded at compiled addr. */
5804 /*
5805 * Process relocations.
5806 @@ -170,8 +170,7 @@ relocated:
5807
5808 1: subl $4, %edi
5809 movl (%edi), %ecx
5810 - testl %ecx, %ecx
5811 - jz 2f
5812 + jecxz 2f
5813 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5814 jmp 1b
5815 2:
5816 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5817 index 35af09d..99c9676 100644
5818 --- a/arch/x86/boot/compressed/head_64.S
5819 +++ b/arch/x86/boot/compressed/head_64.S
5820 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5821 notl %eax
5822 andl %eax, %ebx
5823 #else
5824 - movl $LOAD_PHYSICAL_ADDR, %ebx
5825 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5826 #endif
5827
5828 /* Target address to relocate to for decompression */
5829 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5830 notq %rax
5831 andq %rax, %rbp
5832 #else
5833 - movq $LOAD_PHYSICAL_ADDR, %rbp
5834 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5835 #endif
5836
5837 /* Target address to relocate to for decompression */
5838 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5839 index 3a19d04..7c1d55a 100644
5840 --- a/arch/x86/boot/compressed/misc.c
5841 +++ b/arch/x86/boot/compressed/misc.c
5842 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5843 case PT_LOAD:
5844 #ifdef CONFIG_RELOCATABLE
5845 dest = output;
5846 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5847 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5848 #else
5849 dest = (void *)(phdr->p_paddr);
5850 #endif
5851 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5852 error("Destination address too large");
5853 #endif
5854 #ifndef CONFIG_RELOCATABLE
5855 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5856 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5857 error("Wrong destination address");
5858 #endif
5859
5860 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5861 index 89bbf4e..869908e 100644
5862 --- a/arch/x86/boot/compressed/relocs.c
5863 +++ b/arch/x86/boot/compressed/relocs.c
5864 @@ -13,8 +13,11 @@
5865
5866 static void die(char *fmt, ...);
5867
5868 +#include "../../../../include/generated/autoconf.h"
5869 +
5870 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5871 static Elf32_Ehdr ehdr;
5872 +static Elf32_Phdr *phdr;
5873 static unsigned long reloc_count, reloc_idx;
5874 static unsigned long *relocs;
5875
5876 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5877 }
5878 }
5879
5880 +static void read_phdrs(FILE *fp)
5881 +{
5882 + unsigned int i;
5883 +
5884 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5885 + if (!phdr) {
5886 + die("Unable to allocate %d program headers\n",
5887 + ehdr.e_phnum);
5888 + }
5889 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5890 + die("Seek to %d failed: %s\n",
5891 + ehdr.e_phoff, strerror(errno));
5892 + }
5893 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5894 + die("Cannot read ELF program headers: %s\n",
5895 + strerror(errno));
5896 + }
5897 + for(i = 0; i < ehdr.e_phnum; i++) {
5898 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5899 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5900 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5901 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5902 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5903 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5904 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5905 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5906 + }
5907 +
5908 +}
5909 +
5910 static void read_shdrs(FILE *fp)
5911 {
5912 - int i;
5913 + unsigned int i;
5914 Elf32_Shdr shdr;
5915
5916 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5917 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5918
5919 static void read_strtabs(FILE *fp)
5920 {
5921 - int i;
5922 + unsigned int i;
5923 for (i = 0; i < ehdr.e_shnum; i++) {
5924 struct section *sec = &secs[i];
5925 if (sec->shdr.sh_type != SHT_STRTAB) {
5926 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5927
5928 static void read_symtabs(FILE *fp)
5929 {
5930 - int i,j;
5931 + unsigned int i,j;
5932 for (i = 0; i < ehdr.e_shnum; i++) {
5933 struct section *sec = &secs[i];
5934 if (sec->shdr.sh_type != SHT_SYMTAB) {
5935 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5936
5937 static void read_relocs(FILE *fp)
5938 {
5939 - int i,j;
5940 + unsigned int i,j;
5941 + uint32_t base;
5942 +
5943 for (i = 0; i < ehdr.e_shnum; i++) {
5944 struct section *sec = &secs[i];
5945 if (sec->shdr.sh_type != SHT_REL) {
5946 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5947 die("Cannot read symbol table: %s\n",
5948 strerror(errno));
5949 }
5950 + base = 0;
5951 + for (j = 0; j < ehdr.e_phnum; j++) {
5952 + if (phdr[j].p_type != PT_LOAD )
5953 + continue;
5954 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5955 + continue;
5956 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5957 + break;
5958 + }
5959 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5960 Elf32_Rel *rel = &sec->reltab[j];
5961 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5962 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5963 rel->r_info = elf32_to_cpu(rel->r_info);
5964 }
5965 }
5966 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5967
5968 static void print_absolute_symbols(void)
5969 {
5970 - int i;
5971 + unsigned int i;
5972 printf("Absolute symbols\n");
5973 printf(" Num: Value Size Type Bind Visibility Name\n");
5974 for (i = 0; i < ehdr.e_shnum; i++) {
5975 struct section *sec = &secs[i];
5976 char *sym_strtab;
5977 Elf32_Sym *sh_symtab;
5978 - int j;
5979 + unsigned int j;
5980
5981 if (sec->shdr.sh_type != SHT_SYMTAB) {
5982 continue;
5983 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5984
5985 static void print_absolute_relocs(void)
5986 {
5987 - int i, printed = 0;
5988 + unsigned int i, printed = 0;
5989
5990 for (i = 0; i < ehdr.e_shnum; i++) {
5991 struct section *sec = &secs[i];
5992 struct section *sec_applies, *sec_symtab;
5993 char *sym_strtab;
5994 Elf32_Sym *sh_symtab;
5995 - int j;
5996 + unsigned int j;
5997 if (sec->shdr.sh_type != SHT_REL) {
5998 continue;
5999 }
6000 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6001
6002 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6003 {
6004 - int i;
6005 + unsigned int i;
6006 /* Walk through the relocations */
6007 for (i = 0; i < ehdr.e_shnum; i++) {
6008 char *sym_strtab;
6009 Elf32_Sym *sh_symtab;
6010 struct section *sec_applies, *sec_symtab;
6011 - int j;
6012 + unsigned int j;
6013 struct section *sec = &secs[i];
6014
6015 if (sec->shdr.sh_type != SHT_REL) {
6016 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6017 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6018 continue;
6019 }
6020 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6021 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6022 + continue;
6023 +
6024 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6025 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6026 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6027 + continue;
6028 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6029 + continue;
6030 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6031 + continue;
6032 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6033 + continue;
6034 +#endif
6035 +
6036 switch (r_type) {
6037 case R_386_NONE:
6038 case R_386_PC32:
6039 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6040
6041 static void emit_relocs(int as_text)
6042 {
6043 - int i;
6044 + unsigned int i;
6045 /* Count how many relocations I have and allocate space for them. */
6046 reloc_count = 0;
6047 walk_relocs(count_reloc);
6048 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
6049 fname, strerror(errno));
6050 }
6051 read_ehdr(fp);
6052 + read_phdrs(fp);
6053 read_shdrs(fp);
6054 read_strtabs(fp);
6055 read_symtabs(fp);
6056 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6057 index 4d3ff03..e4972ff 100644
6058 --- a/arch/x86/boot/cpucheck.c
6059 +++ b/arch/x86/boot/cpucheck.c
6060 @@ -74,7 +74,7 @@ static int has_fpu(void)
6061 u16 fcw = -1, fsw = -1;
6062 u32 cr0;
6063
6064 - asm("movl %%cr0,%0" : "=r" (cr0));
6065 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6066 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6067 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6068 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6069 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6070 {
6071 u32 f0, f1;
6072
6073 - asm("pushfl ; "
6074 + asm volatile("pushfl ; "
6075 "pushfl ; "
6076 "popl %0 ; "
6077 "movl %0,%1 ; "
6078 @@ -115,7 +115,7 @@ static void get_flags(void)
6079 set_bit(X86_FEATURE_FPU, cpu.flags);
6080
6081 if (has_eflag(X86_EFLAGS_ID)) {
6082 - asm("cpuid"
6083 + asm volatile("cpuid"
6084 : "=a" (max_intel_level),
6085 "=b" (cpu_vendor[0]),
6086 "=d" (cpu_vendor[1]),
6087 @@ -124,7 +124,7 @@ static void get_flags(void)
6088
6089 if (max_intel_level >= 0x00000001 &&
6090 max_intel_level <= 0x0000ffff) {
6091 - asm("cpuid"
6092 + asm volatile("cpuid"
6093 : "=a" (tfms),
6094 "=c" (cpu.flags[4]),
6095 "=d" (cpu.flags[0])
6096 @@ -136,7 +136,7 @@ static void get_flags(void)
6097 cpu.model += ((tfms >> 16) & 0xf) << 4;
6098 }
6099
6100 - asm("cpuid"
6101 + asm volatile("cpuid"
6102 : "=a" (max_amd_level)
6103 : "a" (0x80000000)
6104 : "ebx", "ecx", "edx");
6105 @@ -144,7 +144,7 @@ static void get_flags(void)
6106 if (max_amd_level >= 0x80000001 &&
6107 max_amd_level <= 0x8000ffff) {
6108 u32 eax = 0x80000001;
6109 - asm("cpuid"
6110 + asm volatile("cpuid"
6111 : "+a" (eax),
6112 "=c" (cpu.flags[6]),
6113 "=d" (cpu.flags[1])
6114 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6115 u32 ecx = MSR_K7_HWCR;
6116 u32 eax, edx;
6117
6118 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6119 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6120 eax &= ~(1 << 15);
6121 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6122 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6123
6124 get_flags(); /* Make sure it really did something */
6125 err = check_flags();
6126 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6127 u32 ecx = MSR_VIA_FCR;
6128 u32 eax, edx;
6129
6130 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6131 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6132 eax |= (1<<1)|(1<<7);
6133 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6134 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6135
6136 set_bit(X86_FEATURE_CX8, cpu.flags);
6137 err = check_flags();
6138 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6139 u32 eax, edx;
6140 u32 level = 1;
6141
6142 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6143 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6144 - asm("cpuid"
6145 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6146 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6147 + asm volatile("cpuid"
6148 : "+a" (level), "=d" (cpu.flags[0])
6149 : : "ecx", "ebx");
6150 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6151 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6152
6153 err = check_flags();
6154 }
6155 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6156 index bdb4d45..0476680 100644
6157 --- a/arch/x86/boot/header.S
6158 +++ b/arch/x86/boot/header.S
6159 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6160 # single linked list of
6161 # struct setup_data
6162
6163 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6164 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6165
6166 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6167 #define VO_INIT_SIZE (VO__end - VO__text)
6168 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6169 index db75d07..8e6d0af 100644
6170 --- a/arch/x86/boot/memory.c
6171 +++ b/arch/x86/boot/memory.c
6172 @@ -19,7 +19,7 @@
6173
6174 static int detect_memory_e820(void)
6175 {
6176 - int count = 0;
6177 + unsigned int count = 0;
6178 struct biosregs ireg, oreg;
6179 struct e820entry *desc = boot_params.e820_map;
6180 static struct e820entry buf; /* static so it is zeroed */
6181 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6182 index 11e8c6e..fdbb1ed 100644
6183 --- a/arch/x86/boot/video-vesa.c
6184 +++ b/arch/x86/boot/video-vesa.c
6185 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6186
6187 boot_params.screen_info.vesapm_seg = oreg.es;
6188 boot_params.screen_info.vesapm_off = oreg.di;
6189 + boot_params.screen_info.vesapm_size = oreg.cx;
6190 }
6191
6192 /*
6193 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6194 index 43eda28..5ab5fdb 100644
6195 --- a/arch/x86/boot/video.c
6196 +++ b/arch/x86/boot/video.c
6197 @@ -96,7 +96,7 @@ static void store_mode_params(void)
6198 static unsigned int get_entry(void)
6199 {
6200 char entry_buf[4];
6201 - int i, len = 0;
6202 + unsigned int i, len = 0;
6203 int key;
6204 unsigned int v;
6205
6206 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6207 index 5b577d5..3c1fed4 100644
6208 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
6209 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6210 @@ -8,6 +8,8 @@
6211 * including this sentence is retained in full.
6212 */
6213
6214 +#include <asm/alternative-asm.h>
6215 +
6216 .extern crypto_ft_tab
6217 .extern crypto_it_tab
6218 .extern crypto_fl_tab
6219 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6220 je B192; \
6221 leaq 32(r9),r9;
6222
6223 +#define ret pax_force_retaddr 0, 1; ret
6224 +
6225 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6226 movq r1,r2; \
6227 movq r3,r4; \
6228 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6229 index be6d9e3..21fbbca 100644
6230 --- a/arch/x86/crypto/aesni-intel_asm.S
6231 +++ b/arch/x86/crypto/aesni-intel_asm.S
6232 @@ -31,6 +31,7 @@
6233
6234 #include <linux/linkage.h>
6235 #include <asm/inst.h>
6236 +#include <asm/alternative-asm.h>
6237
6238 #ifdef __x86_64__
6239 .data
6240 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6241 pop %r14
6242 pop %r13
6243 pop %r12
6244 + pax_force_retaddr 0, 1
6245 ret
6246 +ENDPROC(aesni_gcm_dec)
6247
6248
6249 /*****************************************************************************
6250 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6251 pop %r14
6252 pop %r13
6253 pop %r12
6254 + pax_force_retaddr 0, 1
6255 ret
6256 +ENDPROC(aesni_gcm_enc)
6257
6258 #endif
6259
6260 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
6261 pxor %xmm1, %xmm0
6262 movaps %xmm0, (TKEYP)
6263 add $0x10, TKEYP
6264 + pax_force_retaddr_bts
6265 ret
6266
6267 .align 4
6268 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
6269 shufps $0b01001110, %xmm2, %xmm1
6270 movaps %xmm1, 0x10(TKEYP)
6271 add $0x20, TKEYP
6272 + pax_force_retaddr_bts
6273 ret
6274
6275 .align 4
6276 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
6277
6278 movaps %xmm0, (TKEYP)
6279 add $0x10, TKEYP
6280 + pax_force_retaddr_bts
6281 ret
6282
6283 .align 4
6284 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
6285 pxor %xmm1, %xmm2
6286 movaps %xmm2, (TKEYP)
6287 add $0x10, TKEYP
6288 + pax_force_retaddr_bts
6289 ret
6290
6291 /*
6292 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6293 #ifndef __x86_64__
6294 popl KEYP
6295 #endif
6296 + pax_force_retaddr 0, 1
6297 ret
6298 +ENDPROC(aesni_set_key)
6299
6300 /*
6301 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6302 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6303 popl KLEN
6304 popl KEYP
6305 #endif
6306 + pax_force_retaddr 0, 1
6307 ret
6308 +ENDPROC(aesni_enc)
6309
6310 /*
6311 * _aesni_enc1: internal ABI
6312 @@ -1959,6 +1972,7 @@ _aesni_enc1:
6313 AESENC KEY STATE
6314 movaps 0x70(TKEYP), KEY
6315 AESENCLAST KEY STATE
6316 + pax_force_retaddr_bts
6317 ret
6318
6319 /*
6320 @@ -2067,6 +2081,7 @@ _aesni_enc4:
6321 AESENCLAST KEY STATE2
6322 AESENCLAST KEY STATE3
6323 AESENCLAST KEY STATE4
6324 + pax_force_retaddr_bts
6325 ret
6326
6327 /*
6328 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6329 popl KLEN
6330 popl KEYP
6331 #endif
6332 + pax_force_retaddr 0, 1
6333 ret
6334 +ENDPROC(aesni_dec)
6335
6336 /*
6337 * _aesni_dec1: internal ABI
6338 @@ -2146,6 +2163,7 @@ _aesni_dec1:
6339 AESDEC KEY STATE
6340 movaps 0x70(TKEYP), KEY
6341 AESDECLAST KEY STATE
6342 + pax_force_retaddr_bts
6343 ret
6344
6345 /*
6346 @@ -2254,6 +2272,7 @@ _aesni_dec4:
6347 AESDECLAST KEY STATE2
6348 AESDECLAST KEY STATE3
6349 AESDECLAST KEY STATE4
6350 + pax_force_retaddr_bts
6351 ret
6352
6353 /*
6354 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6355 popl KEYP
6356 popl LEN
6357 #endif
6358 + pax_force_retaddr 0, 1
6359 ret
6360 +ENDPROC(aesni_ecb_enc)
6361
6362 /*
6363 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6364 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6365 popl KEYP
6366 popl LEN
6367 #endif
6368 + pax_force_retaddr 0, 1
6369 ret
6370 +ENDPROC(aesni_ecb_dec)
6371
6372 /*
6373 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6374 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6375 popl LEN
6376 popl IVP
6377 #endif
6378 + pax_force_retaddr 0, 1
6379 ret
6380 +ENDPROC(aesni_cbc_enc)
6381
6382 /*
6383 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6384 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6385 popl LEN
6386 popl IVP
6387 #endif
6388 + pax_force_retaddr 0, 1
6389 ret
6390 +ENDPROC(aesni_cbc_dec)
6391
6392 #ifdef __x86_64__
6393 .align 16
6394 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
6395 mov $1, TCTR_LOW
6396 MOVQ_R64_XMM TCTR_LOW INC
6397 MOVQ_R64_XMM CTR TCTR_LOW
6398 + pax_force_retaddr_bts
6399 ret
6400
6401 /*
6402 @@ -2552,6 +2580,7 @@ _aesni_inc:
6403 .Linc_low:
6404 movaps CTR, IV
6405 PSHUFB_XMM BSWAP_MASK IV
6406 + pax_force_retaddr_bts
6407 ret
6408
6409 /*
6410 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6411 .Lctr_enc_ret:
6412 movups IV, (IVP)
6413 .Lctr_enc_just_ret:
6414 + pax_force_retaddr 0, 1
6415 ret
6416 +ENDPROC(aesni_ctr_enc)
6417 #endif
6418 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6419 index 391d245..67f35c2 100644
6420 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6421 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6422 @@ -20,6 +20,8 @@
6423 *
6424 */
6425
6426 +#include <asm/alternative-asm.h>
6427 +
6428 .file "blowfish-x86_64-asm.S"
6429 .text
6430
6431 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
6432 jnz __enc_xor;
6433
6434 write_block();
6435 + pax_force_retaddr 0, 1
6436 ret;
6437 __enc_xor:
6438 xor_block();
6439 + pax_force_retaddr 0, 1
6440 ret;
6441
6442 .align 8
6443 @@ -188,6 +192,7 @@ blowfish_dec_blk:
6444
6445 movq %r11, %rbp;
6446
6447 + pax_force_retaddr 0, 1
6448 ret;
6449
6450 /**********************************************************************
6451 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6452
6453 popq %rbx;
6454 popq %rbp;
6455 + pax_force_retaddr 0, 1
6456 ret;
6457
6458 __enc_xor4:
6459 @@ -349,6 +355,7 @@ __enc_xor4:
6460
6461 popq %rbx;
6462 popq %rbp;
6463 + pax_force_retaddr 0, 1
6464 ret;
6465
6466 .align 8
6467 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6468 popq %rbx;
6469 popq %rbp;
6470
6471 + pax_force_retaddr 0, 1
6472 ret;
6473
6474 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6475 index 6214a9b..1f4fc9a 100644
6476 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6477 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6478 @@ -1,3 +1,5 @@
6479 +#include <asm/alternative-asm.h>
6480 +
6481 # enter ECRYPT_encrypt_bytes
6482 .text
6483 .p2align 5
6484 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6485 add %r11,%rsp
6486 mov %rdi,%rax
6487 mov %rsi,%rdx
6488 + pax_force_retaddr 0, 1
6489 ret
6490 # bytesatleast65:
6491 ._bytesatleast65:
6492 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
6493 add %r11,%rsp
6494 mov %rdi,%rax
6495 mov %rsi,%rdx
6496 + pax_force_retaddr
6497 ret
6498 # enter ECRYPT_ivsetup
6499 .text
6500 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6501 add %r11,%rsp
6502 mov %rdi,%rax
6503 mov %rsi,%rdx
6504 + pax_force_retaddr
6505 ret
6506 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6507 index b2c2f57..8470cab 100644
6508 --- a/arch/x86/crypto/sha1_ssse3_asm.S
6509 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
6510 @@ -28,6 +28,8 @@
6511 * (at your option) any later version.
6512 */
6513
6514 +#include <asm/alternative-asm.h>
6515 +
6516 #define CTX %rdi // arg1
6517 #define BUF %rsi // arg2
6518 #define CNT %rdx // arg3
6519 @@ -104,6 +106,7 @@
6520 pop %r12
6521 pop %rbp
6522 pop %rbx
6523 + pax_force_retaddr 0, 1
6524 ret
6525
6526 .size \name, .-\name
6527 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6528 index 5b012a2..36d5364 100644
6529 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6530 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6531 @@ -20,6 +20,8 @@
6532 *
6533 */
6534
6535 +#include <asm/alternative-asm.h>
6536 +
6537 .file "twofish-x86_64-asm-3way.S"
6538 .text
6539
6540 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6541 popq %r13;
6542 popq %r14;
6543 popq %r15;
6544 + pax_force_retaddr 0, 1
6545 ret;
6546
6547 __enc_xor3:
6548 @@ -271,6 +274,7 @@ __enc_xor3:
6549 popq %r13;
6550 popq %r14;
6551 popq %r15;
6552 + pax_force_retaddr 0, 1
6553 ret;
6554
6555 .global twofish_dec_blk_3way
6556 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6557 popq %r13;
6558 popq %r14;
6559 popq %r15;
6560 + pax_force_retaddr 0, 1
6561 ret;
6562
6563 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6564 index 7bcf3fc..f53832f 100644
6565 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6566 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6567 @@ -21,6 +21,7 @@
6568 .text
6569
6570 #include <asm/asm-offsets.h>
6571 +#include <asm/alternative-asm.h>
6572
6573 #define a_offset 0
6574 #define b_offset 4
6575 @@ -268,6 +269,7 @@ twofish_enc_blk:
6576
6577 popq R1
6578 movq $1,%rax
6579 + pax_force_retaddr 0, 1
6580 ret
6581
6582 twofish_dec_blk:
6583 @@ -319,4 +321,5 @@ twofish_dec_blk:
6584
6585 popq R1
6586 movq $1,%rax
6587 + pax_force_retaddr 0, 1
6588 ret
6589 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6590 index fd84387..0b4af7d 100644
6591 --- a/arch/x86/ia32/ia32_aout.c
6592 +++ b/arch/x86/ia32/ia32_aout.c
6593 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6594 unsigned long dump_start, dump_size;
6595 struct user32 dump;
6596
6597 + memset(&dump, 0, sizeof(dump));
6598 +
6599 fs = get_fs();
6600 set_fs(KERNEL_DS);
6601 has_dumped = 1;
6602 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6603 index 6557769..ef6ae89 100644
6604 --- a/arch/x86/ia32/ia32_signal.c
6605 +++ b/arch/x86/ia32/ia32_signal.c
6606 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6607 }
6608 seg = get_fs();
6609 set_fs(KERNEL_DS);
6610 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6611 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6612 set_fs(seg);
6613 if (ret >= 0 && uoss_ptr) {
6614 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6615 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6616 */
6617 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6618 size_t frame_size,
6619 - void **fpstate)
6620 + void __user **fpstate)
6621 {
6622 unsigned long sp;
6623
6624 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6625
6626 if (used_math()) {
6627 sp = sp - sig_xstate_ia32_size;
6628 - *fpstate = (struct _fpstate_ia32 *) sp;
6629 + *fpstate = (struct _fpstate_ia32 __user *) sp;
6630 if (save_i387_xstate_ia32(*fpstate) < 0)
6631 return (void __user *) -1L;
6632 }
6633 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6634 sp -= frame_size;
6635 /* Align the stack pointer according to the i386 ABI,
6636 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6637 - sp = ((sp + 4) & -16ul) - 4;
6638 + sp = ((sp - 12) & -16ul) - 4;
6639 return (void __user *) sp;
6640 }
6641
6642 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6643 * These are actually not used anymore, but left because some
6644 * gdb versions depend on them as a marker.
6645 */
6646 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6647 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6648 } put_user_catch(err);
6649
6650 if (err)
6651 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6652 0xb8,
6653 __NR_ia32_rt_sigreturn,
6654 0x80cd,
6655 - 0,
6656 + 0
6657 };
6658
6659 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6660 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6661
6662 if (ka->sa.sa_flags & SA_RESTORER)
6663 restorer = ka->sa.sa_restorer;
6664 + else if (current->mm->context.vdso)
6665 + /* Return stub is in 32bit vsyscall page */
6666 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6667 else
6668 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6669 - rt_sigreturn);
6670 + restorer = &frame->retcode;
6671 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6672
6673 /*
6674 * Not actually used anymore, but left because some gdb
6675 * versions need it.
6676 */
6677 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6678 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6679 } put_user_catch(err);
6680
6681 if (err)
6682 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6683 index a6253ec..4ad2120 100644
6684 --- a/arch/x86/ia32/ia32entry.S
6685 +++ b/arch/x86/ia32/ia32entry.S
6686 @@ -13,7 +13,9 @@
6687 #include <asm/thread_info.h>
6688 #include <asm/segment.h>
6689 #include <asm/irqflags.h>
6690 +#include <asm/pgtable.h>
6691 #include <linux/linkage.h>
6692 +#include <asm/alternative-asm.h>
6693
6694 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6695 #include <linux/elf-em.h>
6696 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6697 ENDPROC(native_irq_enable_sysexit)
6698 #endif
6699
6700 + .macro pax_enter_kernel_user
6701 + pax_set_fptr_mask
6702 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6703 + call pax_enter_kernel_user
6704 +#endif
6705 + .endm
6706 +
6707 + .macro pax_exit_kernel_user
6708 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6709 + call pax_exit_kernel_user
6710 +#endif
6711 +#ifdef CONFIG_PAX_RANDKSTACK
6712 + pushq %rax
6713 + pushq %r11
6714 + call pax_randomize_kstack
6715 + popq %r11
6716 + popq %rax
6717 +#endif
6718 + .endm
6719 +
6720 +.macro pax_erase_kstack
6721 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6722 + call pax_erase_kstack
6723 +#endif
6724 +.endm
6725 +
6726 /*
6727 * 32bit SYSENTER instruction entry.
6728 *
6729 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6730 CFI_REGISTER rsp,rbp
6731 SWAPGS_UNSAFE_STACK
6732 movq PER_CPU_VAR(kernel_stack), %rsp
6733 - addq $(KERNEL_STACK_OFFSET),%rsp
6734 - /*
6735 - * No need to follow this irqs on/off section: the syscall
6736 - * disabled irqs, here we enable it straight after entry:
6737 - */
6738 - ENABLE_INTERRUPTS(CLBR_NONE)
6739 movl %ebp,%ebp /* zero extension */
6740 pushq_cfi $__USER32_DS
6741 /*CFI_REL_OFFSET ss,0*/
6742 @@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6743 CFI_REL_OFFSET rsp,0
6744 pushfq_cfi
6745 /*CFI_REL_OFFSET rflags,0*/
6746 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6747 - CFI_REGISTER rip,r10
6748 + orl $X86_EFLAGS_IF,(%rsp)
6749 + GET_THREAD_INFO(%r11)
6750 + movl TI_sysenter_return(%r11), %r11d
6751 + CFI_REGISTER rip,r11
6752 pushq_cfi $__USER32_CS
6753 /*CFI_REL_OFFSET cs,0*/
6754 movl %eax, %eax
6755 - pushq_cfi %r10
6756 + pushq_cfi %r11
6757 CFI_REL_OFFSET rip,0
6758 pushq_cfi %rax
6759 cld
6760 SAVE_ARGS 0,1,0
6761 + pax_enter_kernel_user
6762 + /*
6763 + * No need to follow this irqs on/off section: the syscall
6764 + * disabled irqs, here we enable it straight after entry:
6765 + */
6766 + ENABLE_INTERRUPTS(CLBR_NONE)
6767 /* no need to do an access_ok check here because rbp has been
6768 32bit zero extended */
6769 +
6770 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6771 + mov $PAX_USER_SHADOW_BASE,%r11
6772 + add %r11,%rbp
6773 +#endif
6774 +
6775 1: movl (%rbp),%ebp
6776 .section __ex_table,"a"
6777 .quad 1b,ia32_badarg
6778 .previous
6779 - GET_THREAD_INFO(%r10)
6780 - orl $TS_COMPAT,TI_status(%r10)
6781 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6782 + GET_THREAD_INFO(%r11)
6783 + orl $TS_COMPAT,TI_status(%r11)
6784 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6785 CFI_REMEMBER_STATE
6786 jnz sysenter_tracesys
6787 cmpq $(IA32_NR_syscalls-1),%rax
6788 @@ -162,13 +198,15 @@ sysenter_do_call:
6789 sysenter_dispatch:
6790 call *ia32_sys_call_table(,%rax,8)
6791 movq %rax,RAX-ARGOFFSET(%rsp)
6792 - GET_THREAD_INFO(%r10)
6793 + GET_THREAD_INFO(%r11)
6794 DISABLE_INTERRUPTS(CLBR_NONE)
6795 TRACE_IRQS_OFF
6796 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6797 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6798 jnz sysexit_audit
6799 sysexit_from_sys_call:
6800 - andl $~TS_COMPAT,TI_status(%r10)
6801 + pax_exit_kernel_user
6802 + pax_erase_kstack
6803 + andl $~TS_COMPAT,TI_status(%r11)
6804 /* clear IF, that popfq doesn't enable interrupts early */
6805 andl $~0x200,EFLAGS-R11(%rsp)
6806 movl RIP-R11(%rsp),%edx /* User %eip */
6807 @@ -194,6 +232,9 @@ sysexit_from_sys_call:
6808 movl %eax,%esi /* 2nd arg: syscall number */
6809 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6810 call audit_syscall_entry
6811 +
6812 + pax_erase_kstack
6813 +
6814 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6815 cmpq $(IA32_NR_syscalls-1),%rax
6816 ja ia32_badsys
6817 @@ -205,7 +246,7 @@ sysexit_from_sys_call:
6818 .endm
6819
6820 .macro auditsys_exit exit
6821 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6822 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6823 jnz ia32_ret_from_sys_call
6824 TRACE_IRQS_ON
6825 sti
6826 @@ -215,12 +256,12 @@ sysexit_from_sys_call:
6827 movzbl %al,%edi /* zero-extend that into %edi */
6828 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6829 call audit_syscall_exit
6830 - GET_THREAD_INFO(%r10)
6831 + GET_THREAD_INFO(%r11)
6832 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6833 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6834 cli
6835 TRACE_IRQS_OFF
6836 - testl %edi,TI_flags(%r10)
6837 + testl %edi,TI_flags(%r11)
6838 jz \exit
6839 CLEAR_RREGS -ARGOFFSET
6840 jmp int_with_check
6841 @@ -238,7 +279,7 @@ sysexit_audit:
6842
6843 sysenter_tracesys:
6844 #ifdef CONFIG_AUDITSYSCALL
6845 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6846 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6847 jz sysenter_auditsys
6848 #endif
6849 SAVE_REST
6850 @@ -246,6 +287,9 @@ sysenter_tracesys:
6851 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6852 movq %rsp,%rdi /* &pt_regs -> arg1 */
6853 call syscall_trace_enter
6854 +
6855 + pax_erase_kstack
6856 +
6857 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6858 RESTORE_REST
6859 cmpq $(IA32_NR_syscalls-1),%rax
6860 @@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
6861 ENTRY(ia32_cstar_target)
6862 CFI_STARTPROC32 simple
6863 CFI_SIGNAL_FRAME
6864 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6865 + CFI_DEF_CFA rsp,0
6866 CFI_REGISTER rip,rcx
6867 /*CFI_REGISTER rflags,r11*/
6868 SWAPGS_UNSAFE_STACK
6869 movl %esp,%r8d
6870 CFI_REGISTER rsp,r8
6871 movq PER_CPU_VAR(kernel_stack),%rsp
6872 + SAVE_ARGS 8*6,0,0
6873 + pax_enter_kernel_user
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879 - SAVE_ARGS 8,0,0
6880 movl %eax,%eax /* zero extension */
6881 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6882 movq %rcx,RIP-ARGOFFSET(%rsp)
6883 @@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
6884 /* no need to do an access_ok check here because r8 has been
6885 32bit zero extended */
6886 /* hardware stack frame is complete now */
6887 +
6888 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6889 + mov $PAX_USER_SHADOW_BASE,%r11
6890 + add %r11,%r8
6891 +#endif
6892 +
6893 1: movl (%r8),%r9d
6894 .section __ex_table,"a"
6895 .quad 1b,ia32_badarg
6896 .previous
6897 - GET_THREAD_INFO(%r10)
6898 - orl $TS_COMPAT,TI_status(%r10)
6899 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6900 + GET_THREAD_INFO(%r11)
6901 + orl $TS_COMPAT,TI_status(%r11)
6902 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6903 CFI_REMEMBER_STATE
6904 jnz cstar_tracesys
6905 cmpq $IA32_NR_syscalls-1,%rax
6906 @@ -321,13 +372,15 @@ cstar_do_call:
6907 cstar_dispatch:
6908 call *ia32_sys_call_table(,%rax,8)
6909 movq %rax,RAX-ARGOFFSET(%rsp)
6910 - GET_THREAD_INFO(%r10)
6911 + GET_THREAD_INFO(%r11)
6912 DISABLE_INTERRUPTS(CLBR_NONE)
6913 TRACE_IRQS_OFF
6914 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6915 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6916 jnz sysretl_audit
6917 sysretl_from_sys_call:
6918 - andl $~TS_COMPAT,TI_status(%r10)
6919 + pax_exit_kernel_user
6920 + pax_erase_kstack
6921 + andl $~TS_COMPAT,TI_status(%r11)
6922 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6923 movl RIP-ARGOFFSET(%rsp),%ecx
6924 CFI_REGISTER rip,rcx
6925 @@ -355,7 +408,7 @@ sysretl_audit:
6926
6927 cstar_tracesys:
6928 #ifdef CONFIG_AUDITSYSCALL
6929 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6930 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6931 jz cstar_auditsys
6932 #endif
6933 xchgl %r9d,%ebp
6934 @@ -364,6 +417,9 @@ cstar_tracesys:
6935 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6936 movq %rsp,%rdi /* &pt_regs -> arg1 */
6937 call syscall_trace_enter
6938 +
6939 + pax_erase_kstack
6940 +
6941 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6942 RESTORE_REST
6943 xchgl %ebp,%r9d
6944 @@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
6945 CFI_REL_OFFSET rip,RIP-RIP
6946 PARAVIRT_ADJUST_EXCEPTION_FRAME
6947 SWAPGS
6948 - /*
6949 - * No need to follow this irqs on/off section: the syscall
6950 - * disabled irqs and here we enable it straight after entry:
6951 - */
6952 - ENABLE_INTERRUPTS(CLBR_NONE)
6953 movl %eax,%eax
6954 pushq_cfi %rax
6955 cld
6956 /* note the registers are not zero extended to the sf.
6957 this could be a problem. */
6958 SAVE_ARGS 0,1,0
6959 - GET_THREAD_INFO(%r10)
6960 - orl $TS_COMPAT,TI_status(%r10)
6961 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6962 + pax_enter_kernel_user
6963 + /*
6964 + * No need to follow this irqs on/off section: the syscall
6965 + * disabled irqs and here we enable it straight after entry:
6966 + */
6967 + ENABLE_INTERRUPTS(CLBR_NONE)
6968 + GET_THREAD_INFO(%r11)
6969 + orl $TS_COMPAT,TI_status(%r11)
6970 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6971 jnz ia32_tracesys
6972 cmpq $(IA32_NR_syscalls-1),%rax
6973 ja ia32_badsys
6974 @@ -441,6 +498,9 @@ ia32_tracesys:
6975 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6976 movq %rsp,%rdi /* &pt_regs -> arg1 */
6977 call syscall_trace_enter
6978 +
6979 + pax_erase_kstack
6980 +
6981 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6982 RESTORE_REST
6983 cmpq $(IA32_NR_syscalls-1),%rax
6984 @@ -455,6 +515,7 @@ ia32_badsys:
6985
6986 quiet_ni_syscall:
6987 movq $-ENOSYS,%rax
6988 + pax_force_retaddr
6989 ret
6990 CFI_ENDPROC
6991
6992 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6993 index f6f5c53..b358b28 100644
6994 --- a/arch/x86/ia32/sys_ia32.c
6995 +++ b/arch/x86/ia32/sys_ia32.c
6996 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6997 */
6998 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6999 {
7000 - typeof(ubuf->st_uid) uid = 0;
7001 - typeof(ubuf->st_gid) gid = 0;
7002 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
7003 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
7004 SET_UID(uid, stat->uid);
7005 SET_GID(gid, stat->gid);
7006 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7007 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7008 }
7009 set_fs(KERNEL_DS);
7010 ret = sys_rt_sigprocmask(how,
7011 - set ? (sigset_t __user *)&s : NULL,
7012 - oset ? (sigset_t __user *)&s : NULL,
7013 + set ? (sigset_t __force_user *)&s : NULL,
7014 + oset ? (sigset_t __force_user *)&s : NULL,
7015 sigsetsize);
7016 set_fs(old_fs);
7017 if (ret)
7018 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7019 return alarm_setitimer(seconds);
7020 }
7021
7022 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7023 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7024 int options)
7025 {
7026 return compat_sys_wait4(pid, stat_addr, options, NULL);
7027 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7028 mm_segment_t old_fs = get_fs();
7029
7030 set_fs(KERNEL_DS);
7031 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7032 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7033 set_fs(old_fs);
7034 if (put_compat_timespec(&t, interval))
7035 return -EFAULT;
7036 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7037 mm_segment_t old_fs = get_fs();
7038
7039 set_fs(KERNEL_DS);
7040 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7041 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7042 set_fs(old_fs);
7043 if (!ret) {
7044 switch (_NSIG_WORDS) {
7045 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7046 if (copy_siginfo_from_user32(&info, uinfo))
7047 return -EFAULT;
7048 set_fs(KERNEL_DS);
7049 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7050 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7051 set_fs(old_fs);
7052 return ret;
7053 }
7054 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7055 return -EFAULT;
7056
7057 set_fs(KERNEL_DS);
7058 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7059 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7060 count);
7061 set_fs(old_fs);
7062
7063 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7064 index 091508b..e245ff2 100644
7065 --- a/arch/x86/include/asm/alternative-asm.h
7066 +++ b/arch/x86/include/asm/alternative-asm.h
7067 @@ -4,10 +4,10 @@
7068
7069 #ifdef CONFIG_SMP
7070 .macro LOCK_PREFIX
7071 -1: lock
7072 +672: lock
7073 .section .smp_locks,"a"
7074 .balign 4
7075 - .long 1b - .
7076 + .long 672b - .
7077 .previous
7078 .endm
7079 #else
7080 @@ -15,6 +15,45 @@
7081 .endm
7082 #endif
7083
7084 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7085 + .macro pax_force_retaddr_bts rip=0
7086 + btsq $63,\rip(%rsp)
7087 + .endm
7088 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7089 + .macro pax_force_retaddr rip=0, reload=0
7090 + btsq $63,\rip(%rsp)
7091 + .endm
7092 + .macro pax_force_fptr ptr
7093 + btsq $63,\ptr
7094 + .endm
7095 + .macro pax_set_fptr_mask
7096 + .endm
7097 +#endif
7098 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7099 + .macro pax_force_retaddr rip=0, reload=0
7100 + .if \reload
7101 + pax_set_fptr_mask
7102 + .endif
7103 + orq %r10,\rip(%rsp)
7104 + .endm
7105 + .macro pax_force_fptr ptr
7106 + orq %r10,\ptr
7107 + .endm
7108 + .macro pax_set_fptr_mask
7109 + movabs $0x8000000000000000,%r10
7110 + .endm
7111 +#endif
7112 +#else
7113 + .macro pax_force_retaddr rip=0, reload=0
7114 + .endm
7115 + .macro pax_force_fptr ptr
7116 + .endm
7117 + .macro pax_force_retaddr_bts rip=0
7118 + .endm
7119 + .macro pax_set_fptr_mask
7120 + .endm
7121 +#endif
7122 +
7123 .macro altinstruction_entry orig alt feature orig_len alt_len
7124 .long \orig - .
7125 .long \alt - .
7126 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7127 index 37ad100..7d47faa 100644
7128 --- a/arch/x86/include/asm/alternative.h
7129 +++ b/arch/x86/include/asm/alternative.h
7130 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7131 ".section .discard,\"aw\",@progbits\n" \
7132 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7133 ".previous\n" \
7134 - ".section .altinstr_replacement, \"ax\"\n" \
7135 + ".section .altinstr_replacement, \"a\"\n" \
7136 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7137 ".previous"
7138
7139 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7140 index 1a6c09a..fec2432 100644
7141 --- a/arch/x86/include/asm/apic.h
7142 +++ b/arch/x86/include/asm/apic.h
7143 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7144
7145 #ifdef CONFIG_X86_LOCAL_APIC
7146
7147 -extern unsigned int apic_verbosity;
7148 +extern int apic_verbosity;
7149 extern int local_apic_timer_c2_ok;
7150
7151 extern int disable_apic;
7152 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7153 index 20370c6..a2eb9b0 100644
7154 --- a/arch/x86/include/asm/apm.h
7155 +++ b/arch/x86/include/asm/apm.h
7156 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7157 __asm__ __volatile__(APM_DO_ZERO_SEGS
7158 "pushl %%edi\n\t"
7159 "pushl %%ebp\n\t"
7160 - "lcall *%%cs:apm_bios_entry\n\t"
7161 + "lcall *%%ss:apm_bios_entry\n\t"
7162 "setc %%al\n\t"
7163 "popl %%ebp\n\t"
7164 "popl %%edi\n\t"
7165 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7166 __asm__ __volatile__(APM_DO_ZERO_SEGS
7167 "pushl %%edi\n\t"
7168 "pushl %%ebp\n\t"
7169 - "lcall *%%cs:apm_bios_entry\n\t"
7170 + "lcall *%%ss:apm_bios_entry\n\t"
7171 "setc %%bl\n\t"
7172 "popl %%ebp\n\t"
7173 "popl %%edi\n\t"
7174 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7175 index 58cb6d4..ca9010d 100644
7176 --- a/arch/x86/include/asm/atomic.h
7177 +++ b/arch/x86/include/asm/atomic.h
7178 @@ -22,7 +22,18 @@
7179 */
7180 static inline int atomic_read(const atomic_t *v)
7181 {
7182 - return (*(volatile int *)&(v)->counter);
7183 + return (*(volatile const int *)&(v)->counter);
7184 +}
7185 +
7186 +/**
7187 + * atomic_read_unchecked - read atomic variable
7188 + * @v: pointer of type atomic_unchecked_t
7189 + *
7190 + * Atomically reads the value of @v.
7191 + */
7192 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7193 +{
7194 + return (*(volatile const int *)&(v)->counter);
7195 }
7196
7197 /**
7198 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7199 }
7200
7201 /**
7202 + * atomic_set_unchecked - set atomic variable
7203 + * @v: pointer of type atomic_unchecked_t
7204 + * @i: required value
7205 + *
7206 + * Atomically sets the value of @v to @i.
7207 + */
7208 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7209 +{
7210 + v->counter = i;
7211 +}
7212 +
7213 +/**
7214 * atomic_add - add integer to atomic variable
7215 * @i: integer value to add
7216 * @v: pointer of type atomic_t
7217 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7218 */
7219 static inline void atomic_add(int i, atomic_t *v)
7220 {
7221 - asm volatile(LOCK_PREFIX "addl %1,%0"
7222 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7223 +
7224 +#ifdef CONFIG_PAX_REFCOUNT
7225 + "jno 0f\n"
7226 + LOCK_PREFIX "subl %1,%0\n"
7227 + "int $4\n0:\n"
7228 + _ASM_EXTABLE(0b, 0b)
7229 +#endif
7230 +
7231 + : "+m" (v->counter)
7232 + : "ir" (i));
7233 +}
7234 +
7235 +/**
7236 + * atomic_add_unchecked - add integer to atomic variable
7237 + * @i: integer value to add
7238 + * @v: pointer of type atomic_unchecked_t
7239 + *
7240 + * Atomically adds @i to @v.
7241 + */
7242 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7243 +{
7244 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7245 : "+m" (v->counter)
7246 : "ir" (i));
7247 }
7248 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7249 */
7250 static inline void atomic_sub(int i, atomic_t *v)
7251 {
7252 - asm volatile(LOCK_PREFIX "subl %1,%0"
7253 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7254 +
7255 +#ifdef CONFIG_PAX_REFCOUNT
7256 + "jno 0f\n"
7257 + LOCK_PREFIX "addl %1,%0\n"
7258 + "int $4\n0:\n"
7259 + _ASM_EXTABLE(0b, 0b)
7260 +#endif
7261 +
7262 + : "+m" (v->counter)
7263 + : "ir" (i));
7264 +}
7265 +
7266 +/**
7267 + * atomic_sub_unchecked - subtract integer from atomic variable
7268 + * @i: integer value to subtract
7269 + * @v: pointer of type atomic_unchecked_t
7270 + *
7271 + * Atomically subtracts @i from @v.
7272 + */
7273 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7274 +{
7275 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7276 : "+m" (v->counter)
7277 : "ir" (i));
7278 }
7279 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7280 {
7281 unsigned char c;
7282
7283 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7284 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7285 +
7286 +#ifdef CONFIG_PAX_REFCOUNT
7287 + "jno 0f\n"
7288 + LOCK_PREFIX "addl %2,%0\n"
7289 + "int $4\n0:\n"
7290 + _ASM_EXTABLE(0b, 0b)
7291 +#endif
7292 +
7293 + "sete %1\n"
7294 : "+m" (v->counter), "=qm" (c)
7295 : "ir" (i) : "memory");
7296 return c;
7297 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7298 */
7299 static inline void atomic_inc(atomic_t *v)
7300 {
7301 - asm volatile(LOCK_PREFIX "incl %0"
7302 + asm volatile(LOCK_PREFIX "incl %0\n"
7303 +
7304 +#ifdef CONFIG_PAX_REFCOUNT
7305 + "jno 0f\n"
7306 + LOCK_PREFIX "decl %0\n"
7307 + "int $4\n0:\n"
7308 + _ASM_EXTABLE(0b, 0b)
7309 +#endif
7310 +
7311 + : "+m" (v->counter));
7312 +}
7313 +
7314 +/**
7315 + * atomic_inc_unchecked - increment atomic variable
7316 + * @v: pointer of type atomic_unchecked_t
7317 + *
7318 + * Atomically increments @v by 1.
7319 + */
7320 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7321 +{
7322 + asm volatile(LOCK_PREFIX "incl %0\n"
7323 : "+m" (v->counter));
7324 }
7325
7326 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7327 */
7328 static inline void atomic_dec(atomic_t *v)
7329 {
7330 - asm volatile(LOCK_PREFIX "decl %0"
7331 + asm volatile(LOCK_PREFIX "decl %0\n"
7332 +
7333 +#ifdef CONFIG_PAX_REFCOUNT
7334 + "jno 0f\n"
7335 + LOCK_PREFIX "incl %0\n"
7336 + "int $4\n0:\n"
7337 + _ASM_EXTABLE(0b, 0b)
7338 +#endif
7339 +
7340 + : "+m" (v->counter));
7341 +}
7342 +
7343 +/**
7344 + * atomic_dec_unchecked - decrement atomic variable
7345 + * @v: pointer of type atomic_unchecked_t
7346 + *
7347 + * Atomically decrements @v by 1.
7348 + */
7349 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7350 +{
7351 + asm volatile(LOCK_PREFIX "decl %0\n"
7352 : "+m" (v->counter));
7353 }
7354
7355 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7356 {
7357 unsigned char c;
7358
7359 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7360 + asm volatile(LOCK_PREFIX "decl %0\n"
7361 +
7362 +#ifdef CONFIG_PAX_REFCOUNT
7363 + "jno 0f\n"
7364 + LOCK_PREFIX "incl %0\n"
7365 + "int $4\n0:\n"
7366 + _ASM_EXTABLE(0b, 0b)
7367 +#endif
7368 +
7369 + "sete %1\n"
7370 : "+m" (v->counter), "=qm" (c)
7371 : : "memory");
7372 return c != 0;
7373 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7374 {
7375 unsigned char c;
7376
7377 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7378 + asm volatile(LOCK_PREFIX "incl %0\n"
7379 +
7380 +#ifdef CONFIG_PAX_REFCOUNT
7381 + "jno 0f\n"
7382 + LOCK_PREFIX "decl %0\n"
7383 + "int $4\n0:\n"
7384 + _ASM_EXTABLE(0b, 0b)
7385 +#endif
7386 +
7387 + "sete %1\n"
7388 + : "+m" (v->counter), "=qm" (c)
7389 + : : "memory");
7390 + return c != 0;
7391 +}
7392 +
7393 +/**
7394 + * atomic_inc_and_test_unchecked - increment and test
7395 + * @v: pointer of type atomic_unchecked_t
7396 + *
7397 + * Atomically increments @v by 1
7398 + * and returns true if the result is zero, or false for all
7399 + * other cases.
7400 + */
7401 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7402 +{
7403 + unsigned char c;
7404 +
7405 + asm volatile(LOCK_PREFIX "incl %0\n"
7406 + "sete %1\n"
7407 : "+m" (v->counter), "=qm" (c)
7408 : : "memory");
7409 return c != 0;
7410 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7411 {
7412 unsigned char c;
7413
7414 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7415 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7416 +
7417 +#ifdef CONFIG_PAX_REFCOUNT
7418 + "jno 0f\n"
7419 + LOCK_PREFIX "subl %2,%0\n"
7420 + "int $4\n0:\n"
7421 + _ASM_EXTABLE(0b, 0b)
7422 +#endif
7423 +
7424 + "sets %1\n"
7425 : "+m" (v->counter), "=qm" (c)
7426 : "ir" (i) : "memory");
7427 return c;
7428 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7429 goto no_xadd;
7430 #endif
7431 /* Modern 486+ processor */
7432 - return i + xadd(&v->counter, i);
7433 + return i + xadd_check_overflow(&v->counter, i);
7434
7435 #ifdef CONFIG_M386
7436 no_xadd: /* Legacy 386 processor */
7437 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7438 }
7439
7440 /**
7441 + * atomic_add_return_unchecked - add integer and return
7442 + * @i: integer value to add
7443 + * @v: pointer of type atomic_unchecked_t
7444 + *
7445 + * Atomically adds @i to @v and returns @i + @v
7446 + */
7447 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7448 +{
7449 +#ifdef CONFIG_M386
7450 + int __i;
7451 + unsigned long flags;
7452 + if (unlikely(boot_cpu_data.x86 <= 3))
7453 + goto no_xadd;
7454 +#endif
7455 + /* Modern 486+ processor */
7456 + return i + xadd(&v->counter, i);
7457 +
7458 +#ifdef CONFIG_M386
7459 +no_xadd: /* Legacy 386 processor */
7460 + raw_local_irq_save(flags);
7461 + __i = atomic_read_unchecked(v);
7462 + atomic_set_unchecked(v, i + __i);
7463 + raw_local_irq_restore(flags);
7464 + return i + __i;
7465 +#endif
7466 +}
7467 +
7468 +/**
7469 * atomic_sub_return - subtract integer and return
7470 * @v: pointer of type atomic_t
7471 * @i: integer value to subtract
7472 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7473 }
7474
7475 #define atomic_inc_return(v) (atomic_add_return(1, v))
7476 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7477 +{
7478 + return atomic_add_return_unchecked(1, v);
7479 +}
7480 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7481
7482 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7483 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7484 return cmpxchg(&v->counter, old, new);
7485 }
7486
7487 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7488 +{
7489 + return cmpxchg(&v->counter, old, new);
7490 +}
7491 +
7492 static inline int atomic_xchg(atomic_t *v, int new)
7493 {
7494 return xchg(&v->counter, new);
7495 }
7496
7497 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7498 +{
7499 + return xchg(&v->counter, new);
7500 +}
7501 +
7502 /**
7503 * __atomic_add_unless - add unless the number is already a given value
7504 * @v: pointer of type atomic_t
7505 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7506 */
7507 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7508 {
7509 - int c, old;
7510 + int c, old, new;
7511 c = atomic_read(v);
7512 for (;;) {
7513 - if (unlikely(c == (u)))
7514 + if (unlikely(c == u))
7515 break;
7516 - old = atomic_cmpxchg((v), c, c + (a));
7517 +
7518 + asm volatile("addl %2,%0\n"
7519 +
7520 +#ifdef CONFIG_PAX_REFCOUNT
7521 + "jno 0f\n"
7522 + "subl %2,%0\n"
7523 + "int $4\n0:\n"
7524 + _ASM_EXTABLE(0b, 0b)
7525 +#endif
7526 +
7527 + : "=r" (new)
7528 + : "0" (c), "ir" (a));
7529 +
7530 + old = atomic_cmpxchg(v, c, new);
7531 if (likely(old == c))
7532 break;
7533 c = old;
7534 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7535 return c;
7536 }
7537
7538 +/**
7539 + * atomic_inc_not_zero_hint - increment if not null
7540 + * @v: pointer of type atomic_t
7541 + * @hint: probable value of the atomic before the increment
7542 + *
7543 + * This version of atomic_inc_not_zero() gives a hint of probable
7544 + * value of the atomic. This helps processor to not read the memory
7545 + * before doing the atomic read/modify/write cycle, lowering
7546 + * number of bus transactions on some arches.
7547 + *
7548 + * Returns: 0 if increment was not done, 1 otherwise.
7549 + */
7550 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7551 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7552 +{
7553 + int val, c = hint, new;
7554 +
7555 + /* sanity test, should be removed by compiler if hint is a constant */
7556 + if (!hint)
7557 + return __atomic_add_unless(v, 1, 0);
7558 +
7559 + do {
7560 + asm volatile("incl %0\n"
7561 +
7562 +#ifdef CONFIG_PAX_REFCOUNT
7563 + "jno 0f\n"
7564 + "decl %0\n"
7565 + "int $4\n0:\n"
7566 + _ASM_EXTABLE(0b, 0b)
7567 +#endif
7568 +
7569 + : "=r" (new)
7570 + : "0" (c));
7571 +
7572 + val = atomic_cmpxchg(v, c, new);
7573 + if (val == c)
7574 + return 1;
7575 + c = val;
7576 + } while (c);
7577 +
7578 + return 0;
7579 +}
7580
7581 /*
7582 * atomic_dec_if_positive - decrement by 1 if old value positive
7583 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7584 index 24098aa..1e37723 100644
7585 --- a/arch/x86/include/asm/atomic64_32.h
7586 +++ b/arch/x86/include/asm/atomic64_32.h
7587 @@ -12,6 +12,14 @@ typedef struct {
7588 u64 __aligned(8) counter;
7589 } atomic64_t;
7590
7591 +#ifdef CONFIG_PAX_REFCOUNT
7592 +typedef struct {
7593 + u64 __aligned(8) counter;
7594 +} atomic64_unchecked_t;
7595 +#else
7596 +typedef atomic64_t atomic64_unchecked_t;
7597 +#endif
7598 +
7599 #define ATOMIC64_INIT(val) { (val) }
7600
7601 #ifdef CONFIG_X86_CMPXCHG64
7602 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7603 }
7604
7605 /**
7606 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7607 + * @p: pointer to type atomic64_unchecked_t
7608 + * @o: expected value
7609 + * @n: new value
7610 + *
7611 + * Atomically sets @v to @n if it was equal to @o and returns
7612 + * the old value.
7613 + */
7614 +
7615 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7616 +{
7617 + return cmpxchg64(&v->counter, o, n);
7618 +}
7619 +
7620 +/**
7621 * atomic64_xchg - xchg atomic64 variable
7622 * @v: pointer to type atomic64_t
7623 * @n: value to assign
7624 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7625 }
7626
7627 /**
7628 + * atomic64_set_unchecked - set atomic64 variable
7629 + * @v: pointer to type atomic64_unchecked_t
7630 + * @n: value to assign
7631 + *
7632 + * Atomically sets the value of @v to @n.
7633 + */
7634 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7635 +{
7636 + unsigned high = (unsigned)(i >> 32);
7637 + unsigned low = (unsigned)i;
7638 + asm volatile(ATOMIC64_ALTERNATIVE(set)
7639 + : "+b" (low), "+c" (high)
7640 + : "S" (v)
7641 + : "eax", "edx", "memory"
7642 + );
7643 +}
7644 +
7645 +/**
7646 * atomic64_read - read atomic64 variable
7647 * @v: pointer to type atomic64_t
7648 *
7649 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7650 }
7651
7652 /**
7653 + * atomic64_read_unchecked - read atomic64 variable
7654 + * @v: pointer to type atomic64_unchecked_t
7655 + *
7656 + * Atomically reads the value of @v and returns it.
7657 + */
7658 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7659 +{
7660 + long long r;
7661 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7662 + : "=A" (r), "+c" (v)
7663 + : : "memory"
7664 + );
7665 + return r;
7666 + }
7667 +
7668 +/**
7669 * atomic64_add_return - add and return
7670 * @i: integer value to add
7671 * @v: pointer to type atomic64_t
7672 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7673 return i;
7674 }
7675
7676 +/**
7677 + * atomic64_add_return_unchecked - add and return
7678 + * @i: integer value to add
7679 + * @v: pointer to type atomic64_unchecked_t
7680 + *
7681 + * Atomically adds @i to @v and returns @i + *@v
7682 + */
7683 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7684 +{
7685 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7686 + : "+A" (i), "+c" (v)
7687 + : : "memory"
7688 + );
7689 + return i;
7690 +}
7691 +
7692 /*
7693 * Other variants with different arithmetic operators:
7694 */
7695 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7696 return a;
7697 }
7698
7699 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7700 +{
7701 + long long a;
7702 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7703 + : "=A" (a)
7704 + : "S" (v)
7705 + : "memory", "ecx"
7706 + );
7707 + return a;
7708 +}
7709 +
7710 static inline long long atomic64_dec_return(atomic64_t *v)
7711 {
7712 long long a;
7713 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7714 }
7715
7716 /**
7717 + * atomic64_add_unchecked - add integer to atomic64 variable
7718 + * @i: integer value to add
7719 + * @v: pointer to type atomic64_unchecked_t
7720 + *
7721 + * Atomically adds @i to @v.
7722 + */
7723 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7724 +{
7725 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7726 + : "+A" (i), "+c" (v)
7727 + : : "memory"
7728 + );
7729 + return i;
7730 +}
7731 +
7732 +/**
7733 * atomic64_sub - subtract the atomic64 variable
7734 * @i: integer value to subtract
7735 * @v: pointer to type atomic64_t
7736 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7737 index 0e1cbfc..5623683 100644
7738 --- a/arch/x86/include/asm/atomic64_64.h
7739 +++ b/arch/x86/include/asm/atomic64_64.h
7740 @@ -18,7 +18,19 @@
7741 */
7742 static inline long atomic64_read(const atomic64_t *v)
7743 {
7744 - return (*(volatile long *)&(v)->counter);
7745 + return (*(volatile const long *)&(v)->counter);
7746 +}
7747 +
7748 +/**
7749 + * atomic64_read_unchecked - read atomic64 variable
7750 + * @v: pointer of type atomic64_unchecked_t
7751 + *
7752 + * Atomically reads the value of @v.
7753 + * Doesn't imply a read memory barrier.
7754 + */
7755 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7756 +{
7757 + return (*(volatile const long *)&(v)->counter);
7758 }
7759
7760 /**
7761 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7762 }
7763
7764 /**
7765 + * atomic64_set_unchecked - set atomic64 variable
7766 + * @v: pointer to type atomic64_unchecked_t
7767 + * @i: required value
7768 + *
7769 + * Atomically sets the value of @v to @i.
7770 + */
7771 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7772 +{
7773 + v->counter = i;
7774 +}
7775 +
7776 +/**
7777 * atomic64_add - add integer to atomic64 variable
7778 * @i: integer value to add
7779 * @v: pointer to type atomic64_t
7780 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7781 */
7782 static inline void atomic64_add(long i, atomic64_t *v)
7783 {
7784 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7785 +
7786 +#ifdef CONFIG_PAX_REFCOUNT
7787 + "jno 0f\n"
7788 + LOCK_PREFIX "subq %1,%0\n"
7789 + "int $4\n0:\n"
7790 + _ASM_EXTABLE(0b, 0b)
7791 +#endif
7792 +
7793 + : "=m" (v->counter)
7794 + : "er" (i), "m" (v->counter));
7795 +}
7796 +
7797 +/**
7798 + * atomic64_add_unchecked - add integer to atomic64 variable
7799 + * @i: integer value to add
7800 + * @v: pointer to type atomic64_unchecked_t
7801 + *
7802 + * Atomically adds @i to @v.
7803 + */
7804 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7805 +{
7806 asm volatile(LOCK_PREFIX "addq %1,%0"
7807 : "=m" (v->counter)
7808 : "er" (i), "m" (v->counter));
7809 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7810 */
7811 static inline void atomic64_sub(long i, atomic64_t *v)
7812 {
7813 - asm volatile(LOCK_PREFIX "subq %1,%0"
7814 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7815 +
7816 +#ifdef CONFIG_PAX_REFCOUNT
7817 + "jno 0f\n"
7818 + LOCK_PREFIX "addq %1,%0\n"
7819 + "int $4\n0:\n"
7820 + _ASM_EXTABLE(0b, 0b)
7821 +#endif
7822 +
7823 + : "=m" (v->counter)
7824 + : "er" (i), "m" (v->counter));
7825 +}
7826 +
7827 +/**
7828 + * atomic64_sub_unchecked - subtract the atomic64 variable
7829 + * @i: integer value to subtract
7830 + * @v: pointer to type atomic64_unchecked_t
7831 + *
7832 + * Atomically subtracts @i from @v.
7833 + */
7834 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7835 +{
7836 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7837 : "=m" (v->counter)
7838 : "er" (i), "m" (v->counter));
7839 }
7840 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7841 {
7842 unsigned char c;
7843
7844 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7845 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7846 +
7847 +#ifdef CONFIG_PAX_REFCOUNT
7848 + "jno 0f\n"
7849 + LOCK_PREFIX "addq %2,%0\n"
7850 + "int $4\n0:\n"
7851 + _ASM_EXTABLE(0b, 0b)
7852 +#endif
7853 +
7854 + "sete %1\n"
7855 : "=m" (v->counter), "=qm" (c)
7856 : "er" (i), "m" (v->counter) : "memory");
7857 return c;
7858 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7859 */
7860 static inline void atomic64_inc(atomic64_t *v)
7861 {
7862 + asm volatile(LOCK_PREFIX "incq %0\n"
7863 +
7864 +#ifdef CONFIG_PAX_REFCOUNT
7865 + "jno 0f\n"
7866 + LOCK_PREFIX "decq %0\n"
7867 + "int $4\n0:\n"
7868 + _ASM_EXTABLE(0b, 0b)
7869 +#endif
7870 +
7871 + : "=m" (v->counter)
7872 + : "m" (v->counter));
7873 +}
7874 +
7875 +/**
7876 + * atomic64_inc_unchecked - increment atomic64 variable
7877 + * @v: pointer to type atomic64_unchecked_t
7878 + *
7879 + * Atomically increments @v by 1.
7880 + */
7881 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7882 +{
7883 asm volatile(LOCK_PREFIX "incq %0"
7884 : "=m" (v->counter)
7885 : "m" (v->counter));
7886 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7887 */
7888 static inline void atomic64_dec(atomic64_t *v)
7889 {
7890 - asm volatile(LOCK_PREFIX "decq %0"
7891 + asm volatile(LOCK_PREFIX "decq %0\n"
7892 +
7893 +#ifdef CONFIG_PAX_REFCOUNT
7894 + "jno 0f\n"
7895 + LOCK_PREFIX "incq %0\n"
7896 + "int $4\n0:\n"
7897 + _ASM_EXTABLE(0b, 0b)
7898 +#endif
7899 +
7900 + : "=m" (v->counter)
7901 + : "m" (v->counter));
7902 +}
7903 +
7904 +/**
7905 + * atomic64_dec_unchecked - decrement atomic64 variable
7906 + * @v: pointer to type atomic64_t
7907 + *
7908 + * Atomically decrements @v by 1.
7909 + */
7910 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7911 +{
7912 + asm volatile(LOCK_PREFIX "decq %0\n"
7913 : "=m" (v->counter)
7914 : "m" (v->counter));
7915 }
7916 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7917 {
7918 unsigned char c;
7919
7920 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7921 + asm volatile(LOCK_PREFIX "decq %0\n"
7922 +
7923 +#ifdef CONFIG_PAX_REFCOUNT
7924 + "jno 0f\n"
7925 + LOCK_PREFIX "incq %0\n"
7926 + "int $4\n0:\n"
7927 + _ASM_EXTABLE(0b, 0b)
7928 +#endif
7929 +
7930 + "sete %1\n"
7931 : "=m" (v->counter), "=qm" (c)
7932 : "m" (v->counter) : "memory");
7933 return c != 0;
7934 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7935 {
7936 unsigned char c;
7937
7938 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7939 + asm volatile(LOCK_PREFIX "incq %0\n"
7940 +
7941 +#ifdef CONFIG_PAX_REFCOUNT
7942 + "jno 0f\n"
7943 + LOCK_PREFIX "decq %0\n"
7944 + "int $4\n0:\n"
7945 + _ASM_EXTABLE(0b, 0b)
7946 +#endif
7947 +
7948 + "sete %1\n"
7949 : "=m" (v->counter), "=qm" (c)
7950 : "m" (v->counter) : "memory");
7951 return c != 0;
7952 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7953 {
7954 unsigned char c;
7955
7956 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7957 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7958 +
7959 +#ifdef CONFIG_PAX_REFCOUNT
7960 + "jno 0f\n"
7961 + LOCK_PREFIX "subq %2,%0\n"
7962 + "int $4\n0:\n"
7963 + _ASM_EXTABLE(0b, 0b)
7964 +#endif
7965 +
7966 + "sets %1\n"
7967 : "=m" (v->counter), "=qm" (c)
7968 : "er" (i), "m" (v->counter) : "memory");
7969 return c;
7970 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7971 */
7972 static inline long atomic64_add_return(long i, atomic64_t *v)
7973 {
7974 + return i + xadd_check_overflow(&v->counter, i);
7975 +}
7976 +
7977 +/**
7978 + * atomic64_add_return_unchecked - add and return
7979 + * @i: integer value to add
7980 + * @v: pointer to type atomic64_unchecked_t
7981 + *
7982 + * Atomically adds @i to @v and returns @i + @v
7983 + */
7984 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7985 +{
7986 return i + xadd(&v->counter, i);
7987 }
7988
7989 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7990 }
7991
7992 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7993 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7994 +{
7995 + return atomic64_add_return_unchecked(1, v);
7996 +}
7997 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7998
7999 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8000 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8001 return cmpxchg(&v->counter, old, new);
8002 }
8003
8004 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8005 +{
8006 + return cmpxchg(&v->counter, old, new);
8007 +}
8008 +
8009 static inline long atomic64_xchg(atomic64_t *v, long new)
8010 {
8011 return xchg(&v->counter, new);
8012 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8013 */
8014 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8015 {
8016 - long c, old;
8017 + long c, old, new;
8018 c = atomic64_read(v);
8019 for (;;) {
8020 - if (unlikely(c == (u)))
8021 + if (unlikely(c == u))
8022 break;
8023 - old = atomic64_cmpxchg((v), c, c + (a));
8024 +
8025 + asm volatile("add %2,%0\n"
8026 +
8027 +#ifdef CONFIG_PAX_REFCOUNT
8028 + "jno 0f\n"
8029 + "sub %2,%0\n"
8030 + "int $4\n0:\n"
8031 + _ASM_EXTABLE(0b, 0b)
8032 +#endif
8033 +
8034 + : "=r" (new)
8035 + : "0" (c), "ir" (a));
8036 +
8037 + old = atomic64_cmpxchg(v, c, new);
8038 if (likely(old == c))
8039 break;
8040 c = old;
8041 }
8042 - return c != (u);
8043 + return c != u;
8044 }
8045
8046 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8047 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8048 index 1775d6e..b65017f 100644
8049 --- a/arch/x86/include/asm/bitops.h
8050 +++ b/arch/x86/include/asm/bitops.h
8051 @@ -38,7 +38,7 @@
8052 * a mask operation on a byte.
8053 */
8054 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8055 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8056 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8057 #define CONST_MASK(nr) (1 << ((nr) & 7))
8058
8059 /**
8060 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8061 index 5e1a2ee..c9f9533 100644
8062 --- a/arch/x86/include/asm/boot.h
8063 +++ b/arch/x86/include/asm/boot.h
8064 @@ -11,10 +11,15 @@
8065 #include <asm/pgtable_types.h>
8066
8067 /* Physical address where kernel should be loaded. */
8068 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8069 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8070 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8071 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8072
8073 +#ifndef __ASSEMBLY__
8074 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8075 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8076 +#endif
8077 +
8078 /* Minimum kernel alignment, as a power of two */
8079 #ifdef CONFIG_X86_64
8080 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8081 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8082 index 48f99f1..d78ebf9 100644
8083 --- a/arch/x86/include/asm/cache.h
8084 +++ b/arch/x86/include/asm/cache.h
8085 @@ -5,12 +5,13 @@
8086
8087 /* L1 cache line size */
8088 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8089 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8090 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8091
8092 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8093 +#define __read_only __attribute__((__section__(".data..read_only")))
8094
8095 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8096 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8097 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8098
8099 #ifdef CONFIG_X86_VSMP
8100 #ifdef CONFIG_SMP
8101 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8102 index 4e12668..501d239 100644
8103 --- a/arch/x86/include/asm/cacheflush.h
8104 +++ b/arch/x86/include/asm/cacheflush.h
8105 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8106 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8107
8108 if (pg_flags == _PGMT_DEFAULT)
8109 - return -1;
8110 + return ~0UL;
8111 else if (pg_flags == _PGMT_WC)
8112 return _PAGE_CACHE_WC;
8113 else if (pg_flags == _PGMT_UC_MINUS)
8114 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8115 index 46fc474..b02b0f9 100644
8116 --- a/arch/x86/include/asm/checksum_32.h
8117 +++ b/arch/x86/include/asm/checksum_32.h
8118 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8119 int len, __wsum sum,
8120 int *src_err_ptr, int *dst_err_ptr);
8121
8122 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8123 + int len, __wsum sum,
8124 + int *src_err_ptr, int *dst_err_ptr);
8125 +
8126 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8127 + int len, __wsum sum,
8128 + int *src_err_ptr, int *dst_err_ptr);
8129 +
8130 /*
8131 * Note: when you get a NULL pointer exception here this means someone
8132 * passed in an incorrect kernel address to one of these functions.
8133 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8134 int *err_ptr)
8135 {
8136 might_sleep();
8137 - return csum_partial_copy_generic((__force void *)src, dst,
8138 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8139 len, sum, err_ptr, NULL);
8140 }
8141
8142 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8143 {
8144 might_sleep();
8145 if (access_ok(VERIFY_WRITE, dst, len))
8146 - return csum_partial_copy_generic(src, (__force void *)dst,
8147 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8148 len, sum, NULL, err_ptr);
8149
8150 if (len)
8151 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8152 index 5d3acdf..6447a02 100644
8153 --- a/arch/x86/include/asm/cmpxchg.h
8154 +++ b/arch/x86/include/asm/cmpxchg.h
8155 @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8156 __compiletime_error("Bad argument size for cmpxchg");
8157 extern void __xadd_wrong_size(void)
8158 __compiletime_error("Bad argument size for xadd");
8159 +extern void __xadd_check_overflow_wrong_size(void)
8160 + __compiletime_error("Bad argument size for xadd_check_overflow");
8161
8162 /*
8163 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8164 @@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8165 __ret; \
8166 })
8167
8168 +#define __xadd_check_overflow(ptr, inc, lock) \
8169 + ({ \
8170 + __typeof__ (*(ptr)) __ret = (inc); \
8171 + switch (sizeof(*(ptr))) { \
8172 + case __X86_CASE_L: \
8173 + asm volatile (lock "xaddl %0, %1\n" \
8174 + "jno 0f\n" \
8175 + "mov %0,%1\n" \
8176 + "int $4\n0:\n" \
8177 + _ASM_EXTABLE(0b, 0b) \
8178 + : "+r" (__ret), "+m" (*(ptr)) \
8179 + : : "memory", "cc"); \
8180 + break; \
8181 + case __X86_CASE_Q: \
8182 + asm volatile (lock "xaddq %q0, %1\n" \
8183 + "jno 0f\n" \
8184 + "mov %0,%1\n" \
8185 + "int $4\n0:\n" \
8186 + _ASM_EXTABLE(0b, 0b) \
8187 + : "+r" (__ret), "+m" (*(ptr)) \
8188 + : : "memory", "cc"); \
8189 + break; \
8190 + default: \
8191 + __xadd_check_overflow_wrong_size(); \
8192 + } \
8193 + __ret; \
8194 + })
8195 +
8196 /*
8197 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8198 * value of "*ptr".
8199 @@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8200 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8201 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8202
8203 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8204 +
8205 #endif /* ASM_X86_CMPXCHG_H */
8206 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8207 index f3444f7..051a196 100644
8208 --- a/arch/x86/include/asm/cpufeature.h
8209 +++ b/arch/x86/include/asm/cpufeature.h
8210 @@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8211 ".section .discard,\"aw\",@progbits\n"
8212 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8213 ".previous\n"
8214 - ".section .altinstr_replacement,\"ax\"\n"
8215 + ".section .altinstr_replacement,\"a\"\n"
8216 "3: movb $1,%0\n"
8217 "4:\n"
8218 ".previous\n"
8219 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8220 index 41935fa..3b40db8 100644
8221 --- a/arch/x86/include/asm/desc.h
8222 +++ b/arch/x86/include/asm/desc.h
8223 @@ -4,6 +4,7 @@
8224 #include <asm/desc_defs.h>
8225 #include <asm/ldt.h>
8226 #include <asm/mmu.h>
8227 +#include <asm/pgtable.h>
8228
8229 #include <linux/smp.h>
8230
8231 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8232
8233 desc->type = (info->read_exec_only ^ 1) << 1;
8234 desc->type |= info->contents << 2;
8235 + desc->type |= info->seg_not_present ^ 1;
8236
8237 desc->s = 1;
8238 desc->dpl = 0x3;
8239 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8240 }
8241
8242 extern struct desc_ptr idt_descr;
8243 -extern gate_desc idt_table[];
8244 -
8245 -struct gdt_page {
8246 - struct desc_struct gdt[GDT_ENTRIES];
8247 -} __attribute__((aligned(PAGE_SIZE)));
8248 -
8249 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8250 +extern gate_desc idt_table[256];
8251
8252 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8253 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8254 {
8255 - return per_cpu(gdt_page, cpu).gdt;
8256 + return cpu_gdt_table[cpu];
8257 }
8258
8259 #ifdef CONFIG_X86_64
8260 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8261 unsigned long base, unsigned dpl, unsigned flags,
8262 unsigned short seg)
8263 {
8264 - gate->a = (seg << 16) | (base & 0xffff);
8265 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8266 + gate->gate.offset_low = base;
8267 + gate->gate.seg = seg;
8268 + gate->gate.reserved = 0;
8269 + gate->gate.type = type;
8270 + gate->gate.s = 0;
8271 + gate->gate.dpl = dpl;
8272 + gate->gate.p = 1;
8273 + gate->gate.offset_high = base >> 16;
8274 }
8275
8276 #endif
8277 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8278
8279 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8280 {
8281 + pax_open_kernel();
8282 memcpy(&idt[entry], gate, sizeof(*gate));
8283 + pax_close_kernel();
8284 }
8285
8286 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8287 {
8288 + pax_open_kernel();
8289 memcpy(&ldt[entry], desc, 8);
8290 + pax_close_kernel();
8291 }
8292
8293 static inline void
8294 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8295 default: size = sizeof(*gdt); break;
8296 }
8297
8298 + pax_open_kernel();
8299 memcpy(&gdt[entry], desc, size);
8300 + pax_close_kernel();
8301 }
8302
8303 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8304 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8305
8306 static inline void native_load_tr_desc(void)
8307 {
8308 + pax_open_kernel();
8309 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8310 + pax_close_kernel();
8311 }
8312
8313 static inline void native_load_gdt(const struct desc_ptr *dtr)
8314 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8315 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8316 unsigned int i;
8317
8318 + pax_open_kernel();
8319 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8320 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8321 + pax_close_kernel();
8322 }
8323
8324 #define _LDT_empty(info) \
8325 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8326 desc->limit = (limit >> 16) & 0xf;
8327 }
8328
8329 -static inline void _set_gate(int gate, unsigned type, void *addr,
8330 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8331 unsigned dpl, unsigned ist, unsigned seg)
8332 {
8333 gate_desc s;
8334 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8335 * Pentium F0 0F bugfix can have resulted in the mapped
8336 * IDT being write-protected.
8337 */
8338 -static inline void set_intr_gate(unsigned int n, void *addr)
8339 +static inline void set_intr_gate(unsigned int n, const void *addr)
8340 {
8341 BUG_ON((unsigned)n > 0xFF);
8342 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8343 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8344 /*
8345 * This routine sets up an interrupt gate at directory privilege level 3.
8346 */
8347 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8348 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8349 {
8350 BUG_ON((unsigned)n > 0xFF);
8351 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8352 }
8353
8354 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8355 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8356 {
8357 BUG_ON((unsigned)n > 0xFF);
8358 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8359 }
8360
8361 -static inline void set_trap_gate(unsigned int n, void *addr)
8362 +static inline void set_trap_gate(unsigned int n, const void *addr)
8363 {
8364 BUG_ON((unsigned)n > 0xFF);
8365 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8366 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8367 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8368 {
8369 BUG_ON((unsigned)n > 0xFF);
8370 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8371 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8372 }
8373
8374 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8375 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8376 {
8377 BUG_ON((unsigned)n > 0xFF);
8378 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8379 }
8380
8381 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8382 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8383 {
8384 BUG_ON((unsigned)n > 0xFF);
8385 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8386 }
8387
8388 +#ifdef CONFIG_X86_32
8389 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8390 +{
8391 + struct desc_struct d;
8392 +
8393 + if (likely(limit))
8394 + limit = (limit - 1UL) >> PAGE_SHIFT;
8395 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8396 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8397 +}
8398 +#endif
8399 +
8400 #endif /* _ASM_X86_DESC_H */
8401 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8402 index 278441f..b95a174 100644
8403 --- a/arch/x86/include/asm/desc_defs.h
8404 +++ b/arch/x86/include/asm/desc_defs.h
8405 @@ -31,6 +31,12 @@ struct desc_struct {
8406 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8407 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8408 };
8409 + struct {
8410 + u16 offset_low;
8411 + u16 seg;
8412 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8413 + unsigned offset_high: 16;
8414 + } gate;
8415 };
8416 } __attribute__((packed));
8417
8418 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8419 index 908b969..a1f4eb4 100644
8420 --- a/arch/x86/include/asm/e820.h
8421 +++ b/arch/x86/include/asm/e820.h
8422 @@ -69,7 +69,7 @@ struct e820map {
8423 #define ISA_START_ADDRESS 0xa0000
8424 #define ISA_END_ADDRESS 0x100000
8425
8426 -#define BIOS_BEGIN 0x000a0000
8427 +#define BIOS_BEGIN 0x000c0000
8428 #define BIOS_END 0x00100000
8429
8430 #define BIOS_ROM_BASE 0xffe00000
8431 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8432 index 5f962df..7289f09 100644
8433 --- a/arch/x86/include/asm/elf.h
8434 +++ b/arch/x86/include/asm/elf.h
8435 @@ -238,7 +238,25 @@ extern int force_personality32;
8436 the loader. We need to make sure that it is out of the way of the program
8437 that it will "exec", and that there is sufficient room for the brk. */
8438
8439 +#ifdef CONFIG_PAX_SEGMEXEC
8440 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8441 +#else
8442 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8443 +#endif
8444 +
8445 +#ifdef CONFIG_PAX_ASLR
8446 +#ifdef CONFIG_X86_32
8447 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8448 +
8449 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8450 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8451 +#else
8452 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8453 +
8454 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8455 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8456 +#endif
8457 +#endif
8458
8459 /* This yields a mask that user programs can use to figure out what
8460 instruction set this CPU supports. This could be done in user space,
8461 @@ -291,9 +309,7 @@ do { \
8462
8463 #define ARCH_DLINFO \
8464 do { \
8465 - if (vdso_enabled) \
8466 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8467 - (unsigned long)current->mm->context.vdso); \
8468 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8469 } while (0)
8470
8471 #define AT_SYSINFO 32
8472 @@ -304,7 +320,7 @@ do { \
8473
8474 #endif /* !CONFIG_X86_32 */
8475
8476 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8477 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8478
8479 #define VDSO_ENTRY \
8480 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8481 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8482 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8483 #define compat_arch_setup_additional_pages syscall32_setup_pages
8484
8485 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8486 -#define arch_randomize_brk arch_randomize_brk
8487 -
8488 /*
8489 * True on X86_32 or when emulating IA32 on X86_64
8490 */
8491 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8492 index cc70c1c..d96d011 100644
8493 --- a/arch/x86/include/asm/emergency-restart.h
8494 +++ b/arch/x86/include/asm/emergency-restart.h
8495 @@ -15,6 +15,6 @@ enum reboot_type {
8496
8497 extern enum reboot_type reboot_type;
8498
8499 -extern void machine_emergency_restart(void);
8500 +extern void machine_emergency_restart(void) __noreturn;
8501
8502 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8503 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8504 index d09bb03..4ea4194 100644
8505 --- a/arch/x86/include/asm/futex.h
8506 +++ b/arch/x86/include/asm/futex.h
8507 @@ -12,16 +12,18 @@
8508 #include <asm/system.h>
8509
8510 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8511 + typecheck(u32 __user *, uaddr); \
8512 asm volatile("1:\t" insn "\n" \
8513 "2:\t.section .fixup,\"ax\"\n" \
8514 "3:\tmov\t%3, %1\n" \
8515 "\tjmp\t2b\n" \
8516 "\t.previous\n" \
8517 _ASM_EXTABLE(1b, 3b) \
8518 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8519 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8520 : "i" (-EFAULT), "0" (oparg), "1" (0))
8521
8522 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8523 + typecheck(u32 __user *, uaddr); \
8524 asm volatile("1:\tmovl %2, %0\n" \
8525 "\tmovl\t%0, %3\n" \
8526 "\t" insn "\n" \
8527 @@ -34,7 +36,7 @@
8528 _ASM_EXTABLE(1b, 4b) \
8529 _ASM_EXTABLE(2b, 4b) \
8530 : "=&a" (oldval), "=&r" (ret), \
8531 - "+m" (*uaddr), "=&r" (tem) \
8532 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8533 : "r" (oparg), "i" (-EFAULT), "1" (0))
8534
8535 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8536 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8537
8538 switch (op) {
8539 case FUTEX_OP_SET:
8540 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8541 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8542 break;
8543 case FUTEX_OP_ADD:
8544 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8545 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8546 uaddr, oparg);
8547 break;
8548 case FUTEX_OP_OR:
8549 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8550 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8551 return -EFAULT;
8552
8553 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8554 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8555 "2:\t.section .fixup, \"ax\"\n"
8556 "3:\tmov %3, %0\n"
8557 "\tjmp 2b\n"
8558 "\t.previous\n"
8559 _ASM_EXTABLE(1b, 3b)
8560 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8561 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8562 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8563 : "memory"
8564 );
8565 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8566 index eb92a6e..b98b2f4 100644
8567 --- a/arch/x86/include/asm/hw_irq.h
8568 +++ b/arch/x86/include/asm/hw_irq.h
8569 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8570 extern void enable_IO_APIC(void);
8571
8572 /* Statistics */
8573 -extern atomic_t irq_err_count;
8574 -extern atomic_t irq_mis_count;
8575 +extern atomic_unchecked_t irq_err_count;
8576 +extern atomic_unchecked_t irq_mis_count;
8577
8578 /* EISA */
8579 extern void eisa_set_level_irq(unsigned int irq);
8580 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8581 index c9e09ea..73888df 100644
8582 --- a/arch/x86/include/asm/i387.h
8583 +++ b/arch/x86/include/asm/i387.h
8584 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8585 {
8586 int err;
8587
8588 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8589 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8590 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8591 +#endif
8592 +
8593 /* See comment in fxsave() below. */
8594 #ifdef CONFIG_AS_FXSAVEQ
8595 asm volatile("1: fxrstorq %[fx]\n\t"
8596 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8597 {
8598 int err;
8599
8600 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8601 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8602 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8603 +#endif
8604 +
8605 /*
8606 * Clear the bytes not touched by the fxsave and reserved
8607 * for the SW usage.
8608 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8609 #endif /* CONFIG_X86_64 */
8610
8611 /* We need a safe address that is cheap to find and that is already
8612 - in L1 during context switch. The best choices are unfortunately
8613 - different for UP and SMP */
8614 -#ifdef CONFIG_SMP
8615 -#define safe_address (__per_cpu_offset[0])
8616 -#else
8617 -#define safe_address (kstat_cpu(0).cpustat.user)
8618 -#endif
8619 + in L1 during context switch. */
8620 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8621
8622 /*
8623 * These must be called with preempt disabled
8624 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8625 struct thread_info *me = current_thread_info();
8626 preempt_disable();
8627 if (me->status & TS_USEDFPU)
8628 - __save_init_fpu(me->task);
8629 + __save_init_fpu(current);
8630 else
8631 clts();
8632 }
8633 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8634 index d8e8eef..99f81ae 100644
8635 --- a/arch/x86/include/asm/io.h
8636 +++ b/arch/x86/include/asm/io.h
8637 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8638
8639 #include <linux/vmalloc.h>
8640
8641 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8642 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8643 +{
8644 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8645 +}
8646 +
8647 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8648 +{
8649 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8650 +}
8651 +
8652 /*
8653 * Convert a virtual cached pointer to an uncached pointer
8654 */
8655 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8656 index bba3cf8..06bc8da 100644
8657 --- a/arch/x86/include/asm/irqflags.h
8658 +++ b/arch/x86/include/asm/irqflags.h
8659 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8660 sti; \
8661 sysexit
8662
8663 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8664 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8665 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8666 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8667 +
8668 #else
8669 #define INTERRUPT_RETURN iret
8670 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8671 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8672 index 5478825..839e88c 100644
8673 --- a/arch/x86/include/asm/kprobes.h
8674 +++ b/arch/x86/include/asm/kprobes.h
8675 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8676 #define RELATIVEJUMP_SIZE 5
8677 #define RELATIVECALL_OPCODE 0xe8
8678 #define RELATIVE_ADDR_SIZE 4
8679 -#define MAX_STACK_SIZE 64
8680 -#define MIN_STACK_SIZE(ADDR) \
8681 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8682 - THREAD_SIZE - (unsigned long)(ADDR))) \
8683 - ? (MAX_STACK_SIZE) \
8684 - : (((unsigned long)current_thread_info()) + \
8685 - THREAD_SIZE - (unsigned long)(ADDR)))
8686 +#define MAX_STACK_SIZE 64UL
8687 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8688
8689 #define flush_insn_slot(p) do { } while (0)
8690
8691 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8692 index b4973f4..7c4d3fc 100644
8693 --- a/arch/x86/include/asm/kvm_host.h
8694 +++ b/arch/x86/include/asm/kvm_host.h
8695 @@ -459,7 +459,7 @@ struct kvm_arch {
8696 unsigned int n_requested_mmu_pages;
8697 unsigned int n_max_mmu_pages;
8698 unsigned int indirect_shadow_pages;
8699 - atomic_t invlpg_counter;
8700 + atomic_unchecked_t invlpg_counter;
8701 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8702 /*
8703 * Hash table of struct kvm_mmu_page.
8704 @@ -638,7 +638,7 @@ struct kvm_x86_ops {
8705 int (*check_intercept)(struct kvm_vcpu *vcpu,
8706 struct x86_instruction_info *info,
8707 enum x86_intercept_stage stage);
8708 -};
8709 +} __do_const;
8710
8711 struct kvm_arch_async_pf {
8712 u32 token;
8713 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8714 index 9cdae5d..300d20f 100644
8715 --- a/arch/x86/include/asm/local.h
8716 +++ b/arch/x86/include/asm/local.h
8717 @@ -18,26 +18,58 @@ typedef struct {
8718
8719 static inline void local_inc(local_t *l)
8720 {
8721 - asm volatile(_ASM_INC "%0"
8722 + asm volatile(_ASM_INC "%0\n"
8723 +
8724 +#ifdef CONFIG_PAX_REFCOUNT
8725 + "jno 0f\n"
8726 + _ASM_DEC "%0\n"
8727 + "int $4\n0:\n"
8728 + _ASM_EXTABLE(0b, 0b)
8729 +#endif
8730 +
8731 : "+m" (l->a.counter));
8732 }
8733
8734 static inline void local_dec(local_t *l)
8735 {
8736 - asm volatile(_ASM_DEC "%0"
8737 + asm volatile(_ASM_DEC "%0\n"
8738 +
8739 +#ifdef CONFIG_PAX_REFCOUNT
8740 + "jno 0f\n"
8741 + _ASM_INC "%0\n"
8742 + "int $4\n0:\n"
8743 + _ASM_EXTABLE(0b, 0b)
8744 +#endif
8745 +
8746 : "+m" (l->a.counter));
8747 }
8748
8749 static inline void local_add(long i, local_t *l)
8750 {
8751 - asm volatile(_ASM_ADD "%1,%0"
8752 + asm volatile(_ASM_ADD "%1,%0\n"
8753 +
8754 +#ifdef CONFIG_PAX_REFCOUNT
8755 + "jno 0f\n"
8756 + _ASM_SUB "%1,%0\n"
8757 + "int $4\n0:\n"
8758 + _ASM_EXTABLE(0b, 0b)
8759 +#endif
8760 +
8761 : "+m" (l->a.counter)
8762 : "ir" (i));
8763 }
8764
8765 static inline void local_sub(long i, local_t *l)
8766 {
8767 - asm volatile(_ASM_SUB "%1,%0"
8768 + asm volatile(_ASM_SUB "%1,%0\n"
8769 +
8770 +#ifdef CONFIG_PAX_REFCOUNT
8771 + "jno 0f\n"
8772 + _ASM_ADD "%1,%0\n"
8773 + "int $4\n0:\n"
8774 + _ASM_EXTABLE(0b, 0b)
8775 +#endif
8776 +
8777 : "+m" (l->a.counter)
8778 : "ir" (i));
8779 }
8780 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8781 {
8782 unsigned char c;
8783
8784 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8785 + asm volatile(_ASM_SUB "%2,%0\n"
8786 +
8787 +#ifdef CONFIG_PAX_REFCOUNT
8788 + "jno 0f\n"
8789 + _ASM_ADD "%2,%0\n"
8790 + "int $4\n0:\n"
8791 + _ASM_EXTABLE(0b, 0b)
8792 +#endif
8793 +
8794 + "sete %1\n"
8795 : "+m" (l->a.counter), "=qm" (c)
8796 : "ir" (i) : "memory");
8797 return c;
8798 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8799 {
8800 unsigned char c;
8801
8802 - asm volatile(_ASM_DEC "%0; sete %1"
8803 + asm volatile(_ASM_DEC "%0\n"
8804 +
8805 +#ifdef CONFIG_PAX_REFCOUNT
8806 + "jno 0f\n"
8807 + _ASM_INC "%0\n"
8808 + "int $4\n0:\n"
8809 + _ASM_EXTABLE(0b, 0b)
8810 +#endif
8811 +
8812 + "sete %1\n"
8813 : "+m" (l->a.counter), "=qm" (c)
8814 : : "memory");
8815 return c != 0;
8816 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8817 {
8818 unsigned char c;
8819
8820 - asm volatile(_ASM_INC "%0; sete %1"
8821 + asm volatile(_ASM_INC "%0\n"
8822 +
8823 +#ifdef CONFIG_PAX_REFCOUNT
8824 + "jno 0f\n"
8825 + _ASM_DEC "%0\n"
8826 + "int $4\n0:\n"
8827 + _ASM_EXTABLE(0b, 0b)
8828 +#endif
8829 +
8830 + "sete %1\n"
8831 : "+m" (l->a.counter), "=qm" (c)
8832 : : "memory");
8833 return c != 0;
8834 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8835 {
8836 unsigned char c;
8837
8838 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8839 + asm volatile(_ASM_ADD "%2,%0\n"
8840 +
8841 +#ifdef CONFIG_PAX_REFCOUNT
8842 + "jno 0f\n"
8843 + _ASM_SUB "%2,%0\n"
8844 + "int $4\n0:\n"
8845 + _ASM_EXTABLE(0b, 0b)
8846 +#endif
8847 +
8848 + "sets %1\n"
8849 : "+m" (l->a.counter), "=qm" (c)
8850 : "ir" (i) : "memory");
8851 return c;
8852 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8853 #endif
8854 /* Modern 486+ processor */
8855 __i = i;
8856 - asm volatile(_ASM_XADD "%0, %1;"
8857 + asm volatile(_ASM_XADD "%0, %1\n"
8858 +
8859 +#ifdef CONFIG_PAX_REFCOUNT
8860 + "jno 0f\n"
8861 + _ASM_MOV "%0,%1\n"
8862 + "int $4\n0:\n"
8863 + _ASM_EXTABLE(0b, 0b)
8864 +#endif
8865 +
8866 : "+r" (i), "+m" (l->a.counter)
8867 : : "memory");
8868 return i + __i;
8869 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8870 index 593e51d..fa69c9a 100644
8871 --- a/arch/x86/include/asm/mman.h
8872 +++ b/arch/x86/include/asm/mman.h
8873 @@ -5,4 +5,14 @@
8874
8875 #include <asm-generic/mman.h>
8876
8877 +#ifdef __KERNEL__
8878 +#ifndef __ASSEMBLY__
8879 +#ifdef CONFIG_X86_32
8880 +#define arch_mmap_check i386_mmap_check
8881 +int i386_mmap_check(unsigned long addr, unsigned long len,
8882 + unsigned long flags);
8883 +#endif
8884 +#endif
8885 +#endif
8886 +
8887 #endif /* _ASM_X86_MMAN_H */
8888 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8889 index 5f55e69..e20bfb1 100644
8890 --- a/arch/x86/include/asm/mmu.h
8891 +++ b/arch/x86/include/asm/mmu.h
8892 @@ -9,7 +9,7 @@
8893 * we put the segment information here.
8894 */
8895 typedef struct {
8896 - void *ldt;
8897 + struct desc_struct *ldt;
8898 int size;
8899
8900 #ifdef CONFIG_X86_64
8901 @@ -18,7 +18,19 @@ typedef struct {
8902 #endif
8903
8904 struct mutex lock;
8905 - void *vdso;
8906 + unsigned long vdso;
8907 +
8908 +#ifdef CONFIG_X86_32
8909 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8910 + unsigned long user_cs_base;
8911 + unsigned long user_cs_limit;
8912 +
8913 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8914 + cpumask_t cpu_user_cs_mask;
8915 +#endif
8916 +
8917 +#endif
8918 +#endif
8919 } mm_context_t;
8920
8921 #ifdef CONFIG_SMP
8922 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8923 index 6902152..399f3a2 100644
8924 --- a/arch/x86/include/asm/mmu_context.h
8925 +++ b/arch/x86/include/asm/mmu_context.h
8926 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8927
8928 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8929 {
8930 +
8931 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8932 + unsigned int i;
8933 + pgd_t *pgd;
8934 +
8935 + pax_open_kernel();
8936 + pgd = get_cpu_pgd(smp_processor_id());
8937 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8938 + set_pgd_batched(pgd+i, native_make_pgd(0));
8939 + pax_close_kernel();
8940 +#endif
8941 +
8942 #ifdef CONFIG_SMP
8943 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8944 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8945 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8946 struct task_struct *tsk)
8947 {
8948 unsigned cpu = smp_processor_id();
8949 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8950 + int tlbstate = TLBSTATE_OK;
8951 +#endif
8952
8953 if (likely(prev != next)) {
8954 #ifdef CONFIG_SMP
8955 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8956 + tlbstate = percpu_read(cpu_tlbstate.state);
8957 +#endif
8958 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8959 percpu_write(cpu_tlbstate.active_mm, next);
8960 #endif
8961 cpumask_set_cpu(cpu, mm_cpumask(next));
8962
8963 /* Re-load page tables */
8964 +#ifdef CONFIG_PAX_PER_CPU_PGD
8965 + pax_open_kernel();
8966 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8967 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8968 + pax_close_kernel();
8969 + load_cr3(get_cpu_pgd(cpu));
8970 +#else
8971 load_cr3(next->pgd);
8972 +#endif
8973
8974 /* stop flush ipis for the previous mm */
8975 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8976 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8977 */
8978 if (unlikely(prev->context.ldt != next->context.ldt))
8979 load_LDT_nolock(&next->context);
8980 - }
8981 +
8982 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8983 + if (!(__supported_pte_mask & _PAGE_NX)) {
8984 + smp_mb__before_clear_bit();
8985 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8986 + smp_mb__after_clear_bit();
8987 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8988 + }
8989 +#endif
8990 +
8991 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8992 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8993 + prev->context.user_cs_limit != next->context.user_cs_limit))
8994 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8995 #ifdef CONFIG_SMP
8996 + else if (unlikely(tlbstate != TLBSTATE_OK))
8997 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8998 +#endif
8999 +#endif
9000 +
9001 + }
9002 else {
9003 +
9004 +#ifdef CONFIG_PAX_PER_CPU_PGD
9005 + pax_open_kernel();
9006 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9007 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9008 + pax_close_kernel();
9009 + load_cr3(get_cpu_pgd(cpu));
9010 +#endif
9011 +
9012 +#ifdef CONFIG_SMP
9013 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9014 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9015
9016 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9017 * tlb flush IPI delivery. We must reload CR3
9018 * to make sure to use no freed page tables.
9019 */
9020 +
9021 +#ifndef CONFIG_PAX_PER_CPU_PGD
9022 load_cr3(next->pgd);
9023 +#endif
9024 +
9025 load_LDT_nolock(&next->context);
9026 +
9027 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9028 + if (!(__supported_pte_mask & _PAGE_NX))
9029 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9030 +#endif
9031 +
9032 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9033 +#ifdef CONFIG_PAX_PAGEEXEC
9034 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9035 +#endif
9036 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9037 +#endif
9038 +
9039 }
9040 +#endif
9041 }
9042 -#endif
9043 }
9044
9045 #define activate_mm(prev, next) \
9046 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9047 index 9eae775..c914fea 100644
9048 --- a/arch/x86/include/asm/module.h
9049 +++ b/arch/x86/include/asm/module.h
9050 @@ -5,6 +5,7 @@
9051
9052 #ifdef CONFIG_X86_64
9053 /* X86_64 does not define MODULE_PROC_FAMILY */
9054 +#define MODULE_PROC_FAMILY ""
9055 #elif defined CONFIG_M386
9056 #define MODULE_PROC_FAMILY "386 "
9057 #elif defined CONFIG_M486
9058 @@ -59,8 +60,20 @@
9059 #error unknown processor family
9060 #endif
9061
9062 -#ifdef CONFIG_X86_32
9063 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9064 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9065 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9066 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9067 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9068 +#else
9069 +#define MODULE_PAX_KERNEXEC ""
9070 #endif
9071
9072 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9073 +#define MODULE_PAX_UDEREF "UDEREF "
9074 +#else
9075 +#define MODULE_PAX_UDEREF ""
9076 +#endif
9077 +
9078 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9079 +
9080 #endif /* _ASM_X86_MODULE_H */
9081 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9082 index 7639dbf..e08a58c 100644
9083 --- a/arch/x86/include/asm/page_64_types.h
9084 +++ b/arch/x86/include/asm/page_64_types.h
9085 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9086
9087 /* duplicated to the one in bootmem.h */
9088 extern unsigned long max_pfn;
9089 -extern unsigned long phys_base;
9090 +extern const unsigned long phys_base;
9091
9092 extern unsigned long __phys_addr(unsigned long);
9093 #define __phys_reloc_hide(x) (x)
9094 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9095 index a7d2db9..edb023e 100644
9096 --- a/arch/x86/include/asm/paravirt.h
9097 +++ b/arch/x86/include/asm/paravirt.h
9098 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9099 val);
9100 }
9101
9102 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9103 +{
9104 + pgdval_t val = native_pgd_val(pgd);
9105 +
9106 + if (sizeof(pgdval_t) > sizeof(long))
9107 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9108 + val, (u64)val >> 32);
9109 + else
9110 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9111 + val);
9112 +}
9113 +
9114 static inline void pgd_clear(pgd_t *pgdp)
9115 {
9116 set_pgd(pgdp, __pgd(0));
9117 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9118 pv_mmu_ops.set_fixmap(idx, phys, flags);
9119 }
9120
9121 +#ifdef CONFIG_PAX_KERNEXEC
9122 +static inline unsigned long pax_open_kernel(void)
9123 +{
9124 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9125 +}
9126 +
9127 +static inline unsigned long pax_close_kernel(void)
9128 +{
9129 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9130 +}
9131 +#else
9132 +static inline unsigned long pax_open_kernel(void) { return 0; }
9133 +static inline unsigned long pax_close_kernel(void) { return 0; }
9134 +#endif
9135 +
9136 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9137
9138 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9139 @@ -964,7 +991,7 @@ extern void default_banner(void);
9140
9141 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9142 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9143 -#define PARA_INDIRECT(addr) *%cs:addr
9144 +#define PARA_INDIRECT(addr) *%ss:addr
9145 #endif
9146
9147 #define INTERRUPT_RETURN \
9148 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
9149 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9150 CLBR_NONE, \
9151 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9152 +
9153 +#define GET_CR0_INTO_RDI \
9154 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9155 + mov %rax,%rdi
9156 +
9157 +#define SET_RDI_INTO_CR0 \
9158 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9159 +
9160 +#define GET_CR3_INTO_RDI \
9161 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9162 + mov %rax,%rdi
9163 +
9164 +#define SET_RDI_INTO_CR3 \
9165 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9166 +
9167 #endif /* CONFIG_X86_32 */
9168
9169 #endif /* __ASSEMBLY__ */
9170 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9171 index 8e8b9a4..f07d725 100644
9172 --- a/arch/x86/include/asm/paravirt_types.h
9173 +++ b/arch/x86/include/asm/paravirt_types.h
9174 @@ -84,20 +84,20 @@ struct pv_init_ops {
9175 */
9176 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9177 unsigned long addr, unsigned len);
9178 -};
9179 +} __no_const;
9180
9181
9182 struct pv_lazy_ops {
9183 /* Set deferred update mode, used for batching operations. */
9184 void (*enter)(void);
9185 void (*leave)(void);
9186 -};
9187 +} __no_const;
9188
9189 struct pv_time_ops {
9190 unsigned long long (*sched_clock)(void);
9191 unsigned long long (*steal_clock)(int cpu);
9192 unsigned long (*get_tsc_khz)(void);
9193 -};
9194 +} __no_const;
9195
9196 struct pv_cpu_ops {
9197 /* hooks for various privileged instructions */
9198 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
9199
9200 void (*start_context_switch)(struct task_struct *prev);
9201 void (*end_context_switch)(struct task_struct *next);
9202 -};
9203 +} __no_const;
9204
9205 struct pv_irq_ops {
9206 /*
9207 @@ -224,7 +224,7 @@ struct pv_apic_ops {
9208 unsigned long start_eip,
9209 unsigned long start_esp);
9210 #endif
9211 -};
9212 +} __no_const;
9213
9214 struct pv_mmu_ops {
9215 unsigned long (*read_cr2)(void);
9216 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
9217 struct paravirt_callee_save make_pud;
9218
9219 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9220 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9221 #endif /* PAGETABLE_LEVELS == 4 */
9222 #endif /* PAGETABLE_LEVELS >= 3 */
9223
9224 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
9225 an mfn. We can tell which is which from the index. */
9226 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9227 phys_addr_t phys, pgprot_t flags);
9228 +
9229 +#ifdef CONFIG_PAX_KERNEXEC
9230 + unsigned long (*pax_open_kernel)(void);
9231 + unsigned long (*pax_close_kernel)(void);
9232 +#endif
9233 +
9234 };
9235
9236 struct arch_spinlock;
9237 @@ -334,7 +341,7 @@ struct pv_lock_ops {
9238 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9239 int (*spin_trylock)(struct arch_spinlock *lock);
9240 void (*spin_unlock)(struct arch_spinlock *lock);
9241 -};
9242 +} __no_const;
9243
9244 /* This contains all the paravirt structures: we get a convenient
9245 * number for each function using the offset which we use to indicate
9246 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9247 index b4389a4..b7ff22c 100644
9248 --- a/arch/x86/include/asm/pgalloc.h
9249 +++ b/arch/x86/include/asm/pgalloc.h
9250 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9251 pmd_t *pmd, pte_t *pte)
9252 {
9253 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9254 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9255 +}
9256 +
9257 +static inline void pmd_populate_user(struct mm_struct *mm,
9258 + pmd_t *pmd, pte_t *pte)
9259 +{
9260 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9261 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9262 }
9263
9264 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9265 index 98391db..8f6984e 100644
9266 --- a/arch/x86/include/asm/pgtable-2level.h
9267 +++ b/arch/x86/include/asm/pgtable-2level.h
9268 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9269
9270 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9271 {
9272 + pax_open_kernel();
9273 *pmdp = pmd;
9274 + pax_close_kernel();
9275 }
9276
9277 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9278 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9279 index effff47..f9e4035 100644
9280 --- a/arch/x86/include/asm/pgtable-3level.h
9281 +++ b/arch/x86/include/asm/pgtable-3level.h
9282 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9283
9284 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9285 {
9286 + pax_open_kernel();
9287 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9288 + pax_close_kernel();
9289 }
9290
9291 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9292 {
9293 + pax_open_kernel();
9294 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9295 + pax_close_kernel();
9296 }
9297
9298 /*
9299 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9300 index 18601c8..3d716d1 100644
9301 --- a/arch/x86/include/asm/pgtable.h
9302 +++ b/arch/x86/include/asm/pgtable.h
9303 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9304
9305 #ifndef __PAGETABLE_PUD_FOLDED
9306 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9307 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9308 #define pgd_clear(pgd) native_pgd_clear(pgd)
9309 #endif
9310
9311 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9312
9313 #define arch_end_context_switch(prev) do {} while(0)
9314
9315 +#define pax_open_kernel() native_pax_open_kernel()
9316 +#define pax_close_kernel() native_pax_close_kernel()
9317 #endif /* CONFIG_PARAVIRT */
9318
9319 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9320 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9321 +
9322 +#ifdef CONFIG_PAX_KERNEXEC
9323 +static inline unsigned long native_pax_open_kernel(void)
9324 +{
9325 + unsigned long cr0;
9326 +
9327 + preempt_disable();
9328 + barrier();
9329 + cr0 = read_cr0() ^ X86_CR0_WP;
9330 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9331 + write_cr0(cr0);
9332 + return cr0 ^ X86_CR0_WP;
9333 +}
9334 +
9335 +static inline unsigned long native_pax_close_kernel(void)
9336 +{
9337 + unsigned long cr0;
9338 +
9339 + cr0 = read_cr0() ^ X86_CR0_WP;
9340 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9341 + write_cr0(cr0);
9342 + barrier();
9343 + preempt_enable_no_resched();
9344 + return cr0 ^ X86_CR0_WP;
9345 +}
9346 +#else
9347 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9348 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9349 +#endif
9350 +
9351 /*
9352 * The following only work if pte_present() is true.
9353 * Undefined behaviour if not..
9354 */
9355 +static inline int pte_user(pte_t pte)
9356 +{
9357 + return pte_val(pte) & _PAGE_USER;
9358 +}
9359 +
9360 static inline int pte_dirty(pte_t pte)
9361 {
9362 return pte_flags(pte) & _PAGE_DIRTY;
9363 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9364 return pte_clear_flags(pte, _PAGE_RW);
9365 }
9366
9367 +static inline pte_t pte_mkread(pte_t pte)
9368 +{
9369 + return __pte(pte_val(pte) | _PAGE_USER);
9370 +}
9371 +
9372 static inline pte_t pte_mkexec(pte_t pte)
9373 {
9374 - return pte_clear_flags(pte, _PAGE_NX);
9375 +#ifdef CONFIG_X86_PAE
9376 + if (__supported_pte_mask & _PAGE_NX)
9377 + return pte_clear_flags(pte, _PAGE_NX);
9378 + else
9379 +#endif
9380 + return pte_set_flags(pte, _PAGE_USER);
9381 +}
9382 +
9383 +static inline pte_t pte_exprotect(pte_t pte)
9384 +{
9385 +#ifdef CONFIG_X86_PAE
9386 + if (__supported_pte_mask & _PAGE_NX)
9387 + return pte_set_flags(pte, _PAGE_NX);
9388 + else
9389 +#endif
9390 + return pte_clear_flags(pte, _PAGE_USER);
9391 }
9392
9393 static inline pte_t pte_mkdirty(pte_t pte)
9394 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9395 #endif
9396
9397 #ifndef __ASSEMBLY__
9398 +
9399 +#ifdef CONFIG_PAX_PER_CPU_PGD
9400 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9401 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9402 +{
9403 + return cpu_pgd[cpu];
9404 +}
9405 +#endif
9406 +
9407 #include <linux/mm_types.h>
9408
9409 static inline int pte_none(pte_t pte)
9410 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9411
9412 static inline int pgd_bad(pgd_t pgd)
9413 {
9414 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9415 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9416 }
9417
9418 static inline int pgd_none(pgd_t pgd)
9419 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9420 * pgd_offset() returns a (pgd_t *)
9421 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9422 */
9423 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9424 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9425 +
9426 +#ifdef CONFIG_PAX_PER_CPU_PGD
9427 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9428 +#endif
9429 +
9430 /*
9431 * a shortcut which implies the use of the kernel's pgd, instead
9432 * of a process's
9433 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9434 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9435 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9436
9437 +#ifdef CONFIG_X86_32
9438 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9439 +#else
9440 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9441 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9442 +
9443 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9444 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9445 +#else
9446 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9447 +#endif
9448 +
9449 +#endif
9450 +
9451 #ifndef __ASSEMBLY__
9452
9453 extern int direct_gbpages;
9454 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9455 * dst and src can be on the same page, but the range must not overlap,
9456 * and must not cross a page boundary.
9457 */
9458 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9459 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9460 {
9461 - memcpy(dst, src, count * sizeof(pgd_t));
9462 + pax_open_kernel();
9463 + while (count--)
9464 + *dst++ = *src++;
9465 + pax_close_kernel();
9466 }
9467
9468 +#ifdef CONFIG_PAX_PER_CPU_PGD
9469 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9470 +#endif
9471 +
9472 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9473 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9474 +#else
9475 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9476 +#endif
9477
9478 #include <asm-generic/pgtable.h>
9479 #endif /* __ASSEMBLY__ */
9480 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9481 index 0c92113..34a77c6 100644
9482 --- a/arch/x86/include/asm/pgtable_32.h
9483 +++ b/arch/x86/include/asm/pgtable_32.h
9484 @@ -25,9 +25,6 @@
9485 struct mm_struct;
9486 struct vm_area_struct;
9487
9488 -extern pgd_t swapper_pg_dir[1024];
9489 -extern pgd_t initial_page_table[1024];
9490 -
9491 static inline void pgtable_cache_init(void) { }
9492 static inline void check_pgt_cache(void) { }
9493 void paging_init(void);
9494 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9495 # include <asm/pgtable-2level.h>
9496 #endif
9497
9498 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9499 +extern pgd_t initial_page_table[PTRS_PER_PGD];
9500 +#ifdef CONFIG_X86_PAE
9501 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9502 +#endif
9503 +
9504 #if defined(CONFIG_HIGHPTE)
9505 #define pte_offset_map(dir, address) \
9506 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9507 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9508 /* Clear a kernel PTE and flush it from the TLB */
9509 #define kpte_clear_flush(ptep, vaddr) \
9510 do { \
9511 + pax_open_kernel(); \
9512 pte_clear(&init_mm, (vaddr), (ptep)); \
9513 + pax_close_kernel(); \
9514 __flush_tlb_one((vaddr)); \
9515 } while (0)
9516
9517 @@ -74,6 +79,9 @@ do { \
9518
9519 #endif /* !__ASSEMBLY__ */
9520
9521 +#define HAVE_ARCH_UNMAPPED_AREA
9522 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9523 +
9524 /*
9525 * kern_addr_valid() is (1) for FLATMEM and (0) for
9526 * SPARSEMEM and DISCONTIGMEM
9527 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9528 index ed5903b..c7fe163 100644
9529 --- a/arch/x86/include/asm/pgtable_32_types.h
9530 +++ b/arch/x86/include/asm/pgtable_32_types.h
9531 @@ -8,7 +8,7 @@
9532 */
9533 #ifdef CONFIG_X86_PAE
9534 # include <asm/pgtable-3level_types.h>
9535 -# define PMD_SIZE (1UL << PMD_SHIFT)
9536 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9537 # define PMD_MASK (~(PMD_SIZE - 1))
9538 #else
9539 # include <asm/pgtable-2level_types.h>
9540 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9541 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9542 #endif
9543
9544 +#ifdef CONFIG_PAX_KERNEXEC
9545 +#ifndef __ASSEMBLY__
9546 +extern unsigned char MODULES_EXEC_VADDR[];
9547 +extern unsigned char MODULES_EXEC_END[];
9548 +#endif
9549 +#include <asm/boot.h>
9550 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9551 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9552 +#else
9553 +#define ktla_ktva(addr) (addr)
9554 +#define ktva_ktla(addr) (addr)
9555 +#endif
9556 +
9557 #define MODULES_VADDR VMALLOC_START
9558 #define MODULES_END VMALLOC_END
9559 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9560 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9561 index 975f709..107976d 100644
9562 --- a/arch/x86/include/asm/pgtable_64.h
9563 +++ b/arch/x86/include/asm/pgtable_64.h
9564 @@ -16,10 +16,14 @@
9565
9566 extern pud_t level3_kernel_pgt[512];
9567 extern pud_t level3_ident_pgt[512];
9568 +extern pud_t level3_vmalloc_start_pgt[512];
9569 +extern pud_t level3_vmalloc_end_pgt[512];
9570 +extern pud_t level3_vmemmap_pgt[512];
9571 +extern pud_t level2_vmemmap_pgt[512];
9572 extern pmd_t level2_kernel_pgt[512];
9573 extern pmd_t level2_fixmap_pgt[512];
9574 -extern pmd_t level2_ident_pgt[512];
9575 -extern pgd_t init_level4_pgt[];
9576 +extern pmd_t level2_ident_pgt[512*2];
9577 +extern pgd_t init_level4_pgt[512];
9578
9579 #define swapper_pg_dir init_level4_pgt
9580
9581 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9582
9583 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9584 {
9585 + pax_open_kernel();
9586 *pmdp = pmd;
9587 + pax_close_kernel();
9588 }
9589
9590 static inline void native_pmd_clear(pmd_t *pmd)
9591 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9592
9593 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9594 {
9595 + pax_open_kernel();
9596 + *pgdp = pgd;
9597 + pax_close_kernel();
9598 +}
9599 +
9600 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9601 +{
9602 *pgdp = pgd;
9603 }
9604
9605 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9606 index 766ea16..5b96cb3 100644
9607 --- a/arch/x86/include/asm/pgtable_64_types.h
9608 +++ b/arch/x86/include/asm/pgtable_64_types.h
9609 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9610 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9611 #define MODULES_END _AC(0xffffffffff000000, UL)
9612 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9613 +#define MODULES_EXEC_VADDR MODULES_VADDR
9614 +#define MODULES_EXEC_END MODULES_END
9615 +
9616 +#define ktla_ktva(addr) (addr)
9617 +#define ktva_ktla(addr) (addr)
9618
9619 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9620 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9621 index 013286a..8b42f4f 100644
9622 --- a/arch/x86/include/asm/pgtable_types.h
9623 +++ b/arch/x86/include/asm/pgtable_types.h
9624 @@ -16,13 +16,12 @@
9625 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9626 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9627 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9628 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9629 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9630 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9631 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9632 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9633 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9634 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9635 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9636 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9637 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9638 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9639
9640 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9641 @@ -40,7 +39,6 @@
9642 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9643 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9644 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9645 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9646 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9647 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9648 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9649 @@ -57,8 +55,10 @@
9650
9651 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9652 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9653 -#else
9654 +#elif defined(CONFIG_KMEMCHECK)
9655 #define _PAGE_NX (_AT(pteval_t, 0))
9656 +#else
9657 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9658 #endif
9659
9660 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9661 @@ -96,6 +96,9 @@
9662 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9663 _PAGE_ACCESSED)
9664
9665 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9666 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9667 +
9668 #define __PAGE_KERNEL_EXEC \
9669 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9670 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9671 @@ -106,7 +109,7 @@
9672 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9673 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9674 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9675 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9676 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9677 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9678 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9679 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9680 @@ -168,8 +171,8 @@
9681 * bits are combined, this will alow user to access the high address mapped
9682 * VDSO in the presence of CONFIG_COMPAT_VDSO
9683 */
9684 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9685 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9686 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9687 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9688 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9689 #endif
9690
9691 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9692 {
9693 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9694 }
9695 +#endif
9696
9697 +#if PAGETABLE_LEVELS == 3
9698 +#include <asm-generic/pgtable-nopud.h>
9699 +#endif
9700 +
9701 +#if PAGETABLE_LEVELS == 2
9702 +#include <asm-generic/pgtable-nopmd.h>
9703 +#endif
9704 +
9705 +#ifndef __ASSEMBLY__
9706 #if PAGETABLE_LEVELS > 3
9707 typedef struct { pudval_t pud; } pud_t;
9708
9709 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9710 return pud.pud;
9711 }
9712 #else
9713 -#include <asm-generic/pgtable-nopud.h>
9714 -
9715 static inline pudval_t native_pud_val(pud_t pud)
9716 {
9717 return native_pgd_val(pud.pgd);
9718 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9719 return pmd.pmd;
9720 }
9721 #else
9722 -#include <asm-generic/pgtable-nopmd.h>
9723 -
9724 static inline pmdval_t native_pmd_val(pmd_t pmd)
9725 {
9726 return native_pgd_val(pmd.pud.pgd);
9727 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9728
9729 extern pteval_t __supported_pte_mask;
9730 extern void set_nx(void);
9731 -extern int nx_enabled;
9732
9733 #define pgprot_writecombine pgprot_writecombine
9734 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9735 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9736 index b650435..eefa566 100644
9737 --- a/arch/x86/include/asm/processor.h
9738 +++ b/arch/x86/include/asm/processor.h
9739 @@ -268,7 +268,7 @@ struct tss_struct {
9740
9741 } ____cacheline_aligned;
9742
9743 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9744 +extern struct tss_struct init_tss[NR_CPUS];
9745
9746 /*
9747 * Save the original ist values for checking stack pointers during debugging
9748 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
9749 */
9750 #define TASK_SIZE PAGE_OFFSET
9751 #define TASK_SIZE_MAX TASK_SIZE
9752 +
9753 +#ifdef CONFIG_PAX_SEGMEXEC
9754 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9755 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9756 +#else
9757 #define STACK_TOP TASK_SIZE
9758 -#define STACK_TOP_MAX STACK_TOP
9759 +#endif
9760 +
9761 +#define STACK_TOP_MAX TASK_SIZE
9762
9763 #define INIT_THREAD { \
9764 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9765 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9766 .vm86_info = NULL, \
9767 .sysenter_cs = __KERNEL_CS, \
9768 .io_bitmap_ptr = NULL, \
9769 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
9770 */
9771 #define INIT_TSS { \
9772 .x86_tss = { \
9773 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9774 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9775 .ss0 = __KERNEL_DS, \
9776 .ss1 = __KERNEL_CS, \
9777 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9778 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
9779 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9780
9781 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9782 -#define KSTK_TOP(info) \
9783 -({ \
9784 - unsigned long *__ptr = (unsigned long *)(info); \
9785 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9786 -})
9787 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9788
9789 /*
9790 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9791 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9792 #define task_pt_regs(task) \
9793 ({ \
9794 struct pt_regs *__regs__; \
9795 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9796 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9797 __regs__ - 1; \
9798 })
9799
9800 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9801 /*
9802 * User space process size. 47bits minus one guard page.
9803 */
9804 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9805 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9806
9807 /* This decides where the kernel will search for a free chunk of vm
9808 * space during mmap's.
9809 */
9810 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9811 - 0xc0000000 : 0xFFFFe000)
9812 + 0xc0000000 : 0xFFFFf000)
9813
9814 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9815 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9816 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9817 #define STACK_TOP_MAX TASK_SIZE_MAX
9818
9819 #define INIT_THREAD { \
9820 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9821 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9822 }
9823
9824 #define INIT_TSS { \
9825 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9826 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9827 }
9828
9829 /*
9830 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9831 */
9832 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9833
9834 +#ifdef CONFIG_PAX_SEGMEXEC
9835 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9836 +#endif
9837 +
9838 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9839
9840 /* Get/set a process' ability to use the timestamp counter instruction */
9841 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9842 index 3566454..4bdfb8c 100644
9843 --- a/arch/x86/include/asm/ptrace.h
9844 +++ b/arch/x86/include/asm/ptrace.h
9845 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9846 }
9847
9848 /*
9849 - * user_mode_vm(regs) determines whether a register set came from user mode.
9850 + * user_mode(regs) determines whether a register set came from user mode.
9851 * This is true if V8086 mode was enabled OR if the register set was from
9852 * protected mode with RPL-3 CS value. This tricky test checks that with
9853 * one comparison. Many places in the kernel can bypass this full check
9854 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9855 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9856 + * be used.
9857 */
9858 -static inline int user_mode(struct pt_regs *regs)
9859 +static inline int user_mode_novm(struct pt_regs *regs)
9860 {
9861 #ifdef CONFIG_X86_32
9862 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9863 #else
9864 - return !!(regs->cs & 3);
9865 + return !!(regs->cs & SEGMENT_RPL_MASK);
9866 #endif
9867 }
9868
9869 -static inline int user_mode_vm(struct pt_regs *regs)
9870 +static inline int user_mode(struct pt_regs *regs)
9871 {
9872 #ifdef CONFIG_X86_32
9873 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9874 USER_RPL;
9875 #else
9876 - return user_mode(regs);
9877 + return user_mode_novm(regs);
9878 #endif
9879 }
9880
9881 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9882 #ifdef CONFIG_X86_64
9883 static inline bool user_64bit_mode(struct pt_regs *regs)
9884 {
9885 + unsigned long cs = regs->cs & 0xffff;
9886 #ifndef CONFIG_PARAVIRT
9887 /*
9888 * On non-paravirt systems, this is the only long mode CPL 3
9889 * selector. We do not allow long mode selectors in the LDT.
9890 */
9891 - return regs->cs == __USER_CS;
9892 + return cs == __USER_CS;
9893 #else
9894 /* Headers are too twisted for this to go in paravirt.h. */
9895 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9896 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9897 #endif
9898 }
9899 #endif
9900 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9901 index 92f29706..a79cbbb 100644
9902 --- a/arch/x86/include/asm/reboot.h
9903 +++ b/arch/x86/include/asm/reboot.h
9904 @@ -6,19 +6,19 @@
9905 struct pt_regs;
9906
9907 struct machine_ops {
9908 - void (*restart)(char *cmd);
9909 - void (*halt)(void);
9910 - void (*power_off)(void);
9911 + void (* __noreturn restart)(char *cmd);
9912 + void (* __noreturn halt)(void);
9913 + void (* __noreturn power_off)(void);
9914 void (*shutdown)(void);
9915 void (*crash_shutdown)(struct pt_regs *);
9916 - void (*emergency_restart)(void);
9917 -};
9918 + void (* __noreturn emergency_restart)(void);
9919 +} __no_const;
9920
9921 extern struct machine_ops machine_ops;
9922
9923 void native_machine_crash_shutdown(struct pt_regs *regs);
9924 void native_machine_shutdown(void);
9925 -void machine_real_restart(unsigned int type);
9926 +void machine_real_restart(unsigned int type) __noreturn;
9927 /* These must match dispatch_table in reboot_32.S */
9928 #define MRR_BIOS 0
9929 #define MRR_APM 1
9930 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9931 index 2dbe4a7..ce1db00 100644
9932 --- a/arch/x86/include/asm/rwsem.h
9933 +++ b/arch/x86/include/asm/rwsem.h
9934 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9935 {
9936 asm volatile("# beginning down_read\n\t"
9937 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9938 +
9939 +#ifdef CONFIG_PAX_REFCOUNT
9940 + "jno 0f\n"
9941 + LOCK_PREFIX _ASM_DEC "(%1)\n"
9942 + "int $4\n0:\n"
9943 + _ASM_EXTABLE(0b, 0b)
9944 +#endif
9945 +
9946 /* adds 0x00000001 */
9947 " jns 1f\n"
9948 " call call_rwsem_down_read_failed\n"
9949 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9950 "1:\n\t"
9951 " mov %1,%2\n\t"
9952 " add %3,%2\n\t"
9953 +
9954 +#ifdef CONFIG_PAX_REFCOUNT
9955 + "jno 0f\n"
9956 + "sub %3,%2\n"
9957 + "int $4\n0:\n"
9958 + _ASM_EXTABLE(0b, 0b)
9959 +#endif
9960 +
9961 " jle 2f\n\t"
9962 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9963 " jnz 1b\n\t"
9964 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9965 long tmp;
9966 asm volatile("# beginning down_write\n\t"
9967 LOCK_PREFIX " xadd %1,(%2)\n\t"
9968 +
9969 +#ifdef CONFIG_PAX_REFCOUNT
9970 + "jno 0f\n"
9971 + "mov %1,(%2)\n"
9972 + "int $4\n0:\n"
9973 + _ASM_EXTABLE(0b, 0b)
9974 +#endif
9975 +
9976 /* adds 0xffff0001, returns the old value */
9977 " test %1,%1\n\t"
9978 /* was the count 0 before? */
9979 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9980 long tmp;
9981 asm volatile("# beginning __up_read\n\t"
9982 LOCK_PREFIX " xadd %1,(%2)\n\t"
9983 +
9984 +#ifdef CONFIG_PAX_REFCOUNT
9985 + "jno 0f\n"
9986 + "mov %1,(%2)\n"
9987 + "int $4\n0:\n"
9988 + _ASM_EXTABLE(0b, 0b)
9989 +#endif
9990 +
9991 /* subtracts 1, returns the old value */
9992 " jns 1f\n\t"
9993 " call call_rwsem_wake\n" /* expects old value in %edx */
9994 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9995 long tmp;
9996 asm volatile("# beginning __up_write\n\t"
9997 LOCK_PREFIX " xadd %1,(%2)\n\t"
9998 +
9999 +#ifdef CONFIG_PAX_REFCOUNT
10000 + "jno 0f\n"
10001 + "mov %1,(%2)\n"
10002 + "int $4\n0:\n"
10003 + _ASM_EXTABLE(0b, 0b)
10004 +#endif
10005 +
10006 /* subtracts 0xffff0001, returns the old value */
10007 " jns 1f\n\t"
10008 " call call_rwsem_wake\n" /* expects old value in %edx */
10009 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10010 {
10011 asm volatile("# beginning __downgrade_write\n\t"
10012 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10013 +
10014 +#ifdef CONFIG_PAX_REFCOUNT
10015 + "jno 0f\n"
10016 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10017 + "int $4\n0:\n"
10018 + _ASM_EXTABLE(0b, 0b)
10019 +#endif
10020 +
10021 /*
10022 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10023 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10024 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10025 */
10026 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10027 {
10028 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10029 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10030 +
10031 +#ifdef CONFIG_PAX_REFCOUNT
10032 + "jno 0f\n"
10033 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10034 + "int $4\n0:\n"
10035 + _ASM_EXTABLE(0b, 0b)
10036 +#endif
10037 +
10038 : "+m" (sem->count)
10039 : "er" (delta));
10040 }
10041 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10042 */
10043 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10044 {
10045 - return delta + xadd(&sem->count, delta);
10046 + return delta + xadd_check_overflow(&sem->count, delta);
10047 }
10048
10049 #endif /* __KERNEL__ */
10050 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10051 index 5e64171..f58957e 100644
10052 --- a/arch/x86/include/asm/segment.h
10053 +++ b/arch/x86/include/asm/segment.h
10054 @@ -64,10 +64,15 @@
10055 * 26 - ESPFIX small SS
10056 * 27 - per-cpu [ offset to per-cpu data area ]
10057 * 28 - stack_canary-20 [ for stack protector ]
10058 - * 29 - unused
10059 - * 30 - unused
10060 + * 29 - PCI BIOS CS
10061 + * 30 - PCI BIOS DS
10062 * 31 - TSS for double fault handler
10063 */
10064 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10065 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10066 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10067 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10068 +
10069 #define GDT_ENTRY_TLS_MIN 6
10070 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10071
10072 @@ -79,6 +84,8 @@
10073
10074 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10075
10076 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10077 +
10078 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10079
10080 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10081 @@ -104,6 +111,12 @@
10082 #define __KERNEL_STACK_CANARY 0
10083 #endif
10084
10085 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10086 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10087 +
10088 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10089 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10090 +
10091 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10092
10093 /*
10094 @@ -141,7 +154,7 @@
10095 */
10096
10097 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10098 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10099 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10100
10101
10102 #else
10103 @@ -165,6 +178,8 @@
10104 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10105 #define __USER32_DS __USER_DS
10106
10107 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10108 +
10109 #define GDT_ENTRY_TSS 8 /* needs two entries */
10110 #define GDT_ENTRY_LDT 10 /* needs two entries */
10111 #define GDT_ENTRY_TLS_MIN 12
10112 @@ -185,6 +200,7 @@
10113 #endif
10114
10115 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10116 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10117 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10118 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10119 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10120 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10121 index 73b11bc..d4a3b63 100644
10122 --- a/arch/x86/include/asm/smp.h
10123 +++ b/arch/x86/include/asm/smp.h
10124 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10125 /* cpus sharing the last level cache: */
10126 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10127 DECLARE_PER_CPU(u16, cpu_llc_id);
10128 -DECLARE_PER_CPU(int, cpu_number);
10129 +DECLARE_PER_CPU(unsigned int, cpu_number);
10130
10131 static inline struct cpumask *cpu_sibling_mask(int cpu)
10132 {
10133 @@ -77,7 +77,7 @@ struct smp_ops {
10134
10135 void (*send_call_func_ipi)(const struct cpumask *mask);
10136 void (*send_call_func_single_ipi)(int cpu);
10137 -};
10138 +} __no_const;
10139
10140 /* Globals due to paravirt */
10141 extern void set_cpu_sibling_map(int cpu);
10142 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10143 extern int safe_smp_processor_id(void);
10144
10145 #elif defined(CONFIG_X86_64_SMP)
10146 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10147 -
10148 -#define stack_smp_processor_id() \
10149 -({ \
10150 - struct thread_info *ti; \
10151 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10152 - ti->cpu; \
10153 -})
10154 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10155 +#define stack_smp_processor_id() raw_smp_processor_id()
10156 #define safe_smp_processor_id() smp_processor_id()
10157
10158 #endif
10159 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10160 index 972c260..43ab1fd 100644
10161 --- a/arch/x86/include/asm/spinlock.h
10162 +++ b/arch/x86/include/asm/spinlock.h
10163 @@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10164 static inline void arch_read_lock(arch_rwlock_t *rw)
10165 {
10166 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10167 +
10168 +#ifdef CONFIG_PAX_REFCOUNT
10169 + "jno 0f\n"
10170 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10171 + "int $4\n0:\n"
10172 + _ASM_EXTABLE(0b, 0b)
10173 +#endif
10174 +
10175 "jns 1f\n"
10176 "call __read_lock_failed\n\t"
10177 "1:\n"
10178 @@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10179 static inline void arch_write_lock(arch_rwlock_t *rw)
10180 {
10181 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10182 +
10183 +#ifdef CONFIG_PAX_REFCOUNT
10184 + "jno 0f\n"
10185 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10186 + "int $4\n0:\n"
10187 + _ASM_EXTABLE(0b, 0b)
10188 +#endif
10189 +
10190 "jz 1f\n"
10191 "call __write_lock_failed\n\t"
10192 "1:\n"
10193 @@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10194
10195 static inline void arch_read_unlock(arch_rwlock_t *rw)
10196 {
10197 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10198 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10199 +
10200 +#ifdef CONFIG_PAX_REFCOUNT
10201 + "jno 0f\n"
10202 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10203 + "int $4\n0:\n"
10204 + _ASM_EXTABLE(0b, 0b)
10205 +#endif
10206 +
10207 :"+m" (rw->lock) : : "memory");
10208 }
10209
10210 static inline void arch_write_unlock(arch_rwlock_t *rw)
10211 {
10212 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10213 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10214 +
10215 +#ifdef CONFIG_PAX_REFCOUNT
10216 + "jno 0f\n"
10217 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10218 + "int $4\n0:\n"
10219 + _ASM_EXTABLE(0b, 0b)
10220 +#endif
10221 +
10222 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10223 }
10224
10225 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10226 index 1575177..cb23f52 100644
10227 --- a/arch/x86/include/asm/stackprotector.h
10228 +++ b/arch/x86/include/asm/stackprotector.h
10229 @@ -48,7 +48,7 @@
10230 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10231 */
10232 #define GDT_STACK_CANARY_INIT \
10233 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10234 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10235
10236 /*
10237 * Initialize the stackprotector canary value.
10238 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10239
10240 static inline void load_stack_canary_segment(void)
10241 {
10242 -#ifdef CONFIG_X86_32
10243 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10244 asm volatile ("mov %0, %%gs" : : "r" (0));
10245 #endif
10246 }
10247 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10248 index 70bbe39..4ae2bd4 100644
10249 --- a/arch/x86/include/asm/stacktrace.h
10250 +++ b/arch/x86/include/asm/stacktrace.h
10251 @@ -11,28 +11,20 @@
10252
10253 extern int kstack_depth_to_print;
10254
10255 -struct thread_info;
10256 +struct task_struct;
10257 struct stacktrace_ops;
10258
10259 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10260 - unsigned long *stack,
10261 - unsigned long bp,
10262 - const struct stacktrace_ops *ops,
10263 - void *data,
10264 - unsigned long *end,
10265 - int *graph);
10266 +typedef unsigned long walk_stack_t(struct task_struct *task,
10267 + void *stack_start,
10268 + unsigned long *stack,
10269 + unsigned long bp,
10270 + const struct stacktrace_ops *ops,
10271 + void *data,
10272 + unsigned long *end,
10273 + int *graph);
10274
10275 -extern unsigned long
10276 -print_context_stack(struct thread_info *tinfo,
10277 - unsigned long *stack, unsigned long bp,
10278 - const struct stacktrace_ops *ops, void *data,
10279 - unsigned long *end, int *graph);
10280 -
10281 -extern unsigned long
10282 -print_context_stack_bp(struct thread_info *tinfo,
10283 - unsigned long *stack, unsigned long bp,
10284 - const struct stacktrace_ops *ops, void *data,
10285 - unsigned long *end, int *graph);
10286 +extern walk_stack_t print_context_stack;
10287 +extern walk_stack_t print_context_stack_bp;
10288
10289 /* Generic stack tracer with callbacks */
10290
10291 @@ -40,7 +32,7 @@ struct stacktrace_ops {
10292 void (*address)(void *data, unsigned long address, int reliable);
10293 /* On negative return stop dumping */
10294 int (*stack)(void *data, char *name);
10295 - walk_stack_t walk_stack;
10296 + walk_stack_t *walk_stack;
10297 };
10298
10299 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10300 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10301 index cb23852..2dde194 100644
10302 --- a/arch/x86/include/asm/sys_ia32.h
10303 +++ b/arch/x86/include/asm/sys_ia32.h
10304 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10305 compat_sigset_t __user *, unsigned int);
10306 asmlinkage long sys32_alarm(unsigned int);
10307
10308 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10309 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10310 asmlinkage long sys32_sysfs(int, u32, u32);
10311
10312 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10313 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10314 index 2d2f01c..f985723 100644
10315 --- a/arch/x86/include/asm/system.h
10316 +++ b/arch/x86/include/asm/system.h
10317 @@ -129,7 +129,7 @@ do { \
10318 "call __switch_to\n\t" \
10319 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10320 __switch_canary \
10321 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10322 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10323 "movq %%rax,%%rdi\n\t" \
10324 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10325 "jnz ret_from_fork\n\t" \
10326 @@ -140,7 +140,7 @@ do { \
10327 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10328 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10329 [_tif_fork] "i" (_TIF_FORK), \
10330 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10331 + [thread_info] "m" (current_tinfo), \
10332 [current_task] "m" (current_task) \
10333 __switch_canary_iparam \
10334 : "memory", "cc" __EXTRA_CLOBBER)
10335 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10336 {
10337 unsigned long __limit;
10338 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10339 - return __limit + 1;
10340 + return __limit;
10341 }
10342
10343 static inline void native_clts(void)
10344 @@ -397,13 +397,13 @@ void enable_hlt(void);
10345
10346 void cpu_idle_wait(void);
10347
10348 -extern unsigned long arch_align_stack(unsigned long sp);
10349 +#define arch_align_stack(x) ((x) & ~0xfUL)
10350 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10351
10352 void default_idle(void);
10353 bool set_pm_idle_to_default(void);
10354
10355 -void stop_this_cpu(void *dummy);
10356 +void stop_this_cpu(void *dummy) __noreturn;
10357
10358 /*
10359 * Force strict CPU ordering.
10360 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10361 index a1fe5c1..ee326d8 100644
10362 --- a/arch/x86/include/asm/thread_info.h
10363 +++ b/arch/x86/include/asm/thread_info.h
10364 @@ -10,6 +10,7 @@
10365 #include <linux/compiler.h>
10366 #include <asm/page.h>
10367 #include <asm/types.h>
10368 +#include <asm/percpu.h>
10369
10370 /*
10371 * low level task data that entry.S needs immediate access to
10372 @@ -24,7 +25,6 @@ struct exec_domain;
10373 #include <linux/atomic.h>
10374
10375 struct thread_info {
10376 - struct task_struct *task; /* main task structure */
10377 struct exec_domain *exec_domain; /* execution domain */
10378 __u32 flags; /* low level flags */
10379 __u32 status; /* thread synchronous flags */
10380 @@ -34,18 +34,12 @@ struct thread_info {
10381 mm_segment_t addr_limit;
10382 struct restart_block restart_block;
10383 void __user *sysenter_return;
10384 -#ifdef CONFIG_X86_32
10385 - unsigned long previous_esp; /* ESP of the previous stack in
10386 - case of nested (IRQ) stacks
10387 - */
10388 - __u8 supervisor_stack[0];
10389 -#endif
10390 + unsigned long lowest_stack;
10391 int uaccess_err;
10392 };
10393
10394 -#define INIT_THREAD_INFO(tsk) \
10395 +#define INIT_THREAD_INFO \
10396 { \
10397 - .task = &tsk, \
10398 .exec_domain = &default_exec_domain, \
10399 .flags = 0, \
10400 .cpu = 0, \
10401 @@ -56,7 +50,7 @@ struct thread_info {
10402 }, \
10403 }
10404
10405 -#define init_thread_info (init_thread_union.thread_info)
10406 +#define init_thread_info (init_thread_union.stack)
10407 #define init_stack (init_thread_union.stack)
10408
10409 #else /* !__ASSEMBLY__ */
10410 @@ -170,45 +164,40 @@ struct thread_info {
10411 ret; \
10412 })
10413
10414 -#ifdef CONFIG_X86_32
10415 -
10416 -#define STACK_WARN (THREAD_SIZE/8)
10417 -/*
10418 - * macros/functions for gaining access to the thread information structure
10419 - *
10420 - * preempt_count needs to be 1 initially, until the scheduler is functional.
10421 - */
10422 -#ifndef __ASSEMBLY__
10423 -
10424 -
10425 -/* how to get the current stack pointer from C */
10426 -register unsigned long current_stack_pointer asm("esp") __used;
10427 -
10428 -/* how to get the thread information struct from C */
10429 -static inline struct thread_info *current_thread_info(void)
10430 -{
10431 - return (struct thread_info *)
10432 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10433 -}
10434 -
10435 -#else /* !__ASSEMBLY__ */
10436 -
10437 +#ifdef __ASSEMBLY__
10438 /* how to get the thread information struct from ASM */
10439 #define GET_THREAD_INFO(reg) \
10440 - movl $-THREAD_SIZE, reg; \
10441 - andl %esp, reg
10442 + mov PER_CPU_VAR(current_tinfo), reg
10443
10444 /* use this one if reg already contains %esp */
10445 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10446 - andl $-THREAD_SIZE, reg
10447 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10448 +#else
10449 +/* how to get the thread information struct from C */
10450 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10451 +
10452 +static __always_inline struct thread_info *current_thread_info(void)
10453 +{
10454 + return percpu_read_stable(current_tinfo);
10455 +}
10456 +#endif
10457 +
10458 +#ifdef CONFIG_X86_32
10459 +
10460 +#define STACK_WARN (THREAD_SIZE/8)
10461 +/*
10462 + * macros/functions for gaining access to the thread information structure
10463 + *
10464 + * preempt_count needs to be 1 initially, until the scheduler is functional.
10465 + */
10466 +#ifndef __ASSEMBLY__
10467 +
10468 +/* how to get the current stack pointer from C */
10469 +register unsigned long current_stack_pointer asm("esp") __used;
10470
10471 #endif
10472
10473 #else /* X86_32 */
10474
10475 -#include <asm/percpu.h>
10476 -#define KERNEL_STACK_OFFSET (5*8)
10477 -
10478 /*
10479 * macros/functions for gaining access to the thread information structure
10480 * preempt_count needs to be 1 initially, until the scheduler is functional.
10481 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10482 #ifndef __ASSEMBLY__
10483 DECLARE_PER_CPU(unsigned long, kernel_stack);
10484
10485 -static inline struct thread_info *current_thread_info(void)
10486 -{
10487 - struct thread_info *ti;
10488 - ti = (void *)(percpu_read_stable(kernel_stack) +
10489 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10490 - return ti;
10491 -}
10492 -
10493 -#else /* !__ASSEMBLY__ */
10494 -
10495 -/* how to get the thread information struct from ASM */
10496 -#define GET_THREAD_INFO(reg) \
10497 - movq PER_CPU_VAR(kernel_stack),reg ; \
10498 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10499 -
10500 +/* how to get the current stack pointer from C */
10501 +register unsigned long current_stack_pointer asm("rsp") __used;
10502 #endif
10503
10504 #endif /* !X86_32 */
10505 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10506 extern void free_thread_info(struct thread_info *ti);
10507 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10508 #define arch_task_cache_init arch_task_cache_init
10509 +
10510 +#define __HAVE_THREAD_FUNCTIONS
10511 +#define task_thread_info(task) (&(task)->tinfo)
10512 +#define task_stack_page(task) ((task)->stack)
10513 +#define setup_thread_stack(p, org) do {} while (0)
10514 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10515 +
10516 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10517 +extern struct task_struct *alloc_task_struct_node(int node);
10518 +extern void free_task_struct(struct task_struct *);
10519 +
10520 #endif
10521 #endif /* _ASM_X86_THREAD_INFO_H */
10522 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10523 index 36361bf..324f262 100644
10524 --- a/arch/x86/include/asm/uaccess.h
10525 +++ b/arch/x86/include/asm/uaccess.h
10526 @@ -7,12 +7,15 @@
10527 #include <linux/compiler.h>
10528 #include <linux/thread_info.h>
10529 #include <linux/string.h>
10530 +#include <linux/sched.h>
10531 #include <asm/asm.h>
10532 #include <asm/page.h>
10533
10534 #define VERIFY_READ 0
10535 #define VERIFY_WRITE 1
10536
10537 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10538 +
10539 /*
10540 * The fs value determines whether argument validity checking should be
10541 * performed or not. If get_fs() == USER_DS, checking is performed, with
10542 @@ -28,7 +31,12 @@
10543
10544 #define get_ds() (KERNEL_DS)
10545 #define get_fs() (current_thread_info()->addr_limit)
10546 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10547 +void __set_fs(mm_segment_t x);
10548 +void set_fs(mm_segment_t x);
10549 +#else
10550 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10551 +#endif
10552
10553 #define segment_eq(a, b) ((a).seg == (b).seg)
10554
10555 @@ -76,7 +84,33 @@
10556 * checks that the pointer is in the user space range - after calling
10557 * this function, memory access functions may still return -EFAULT.
10558 */
10559 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10560 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10561 +#define access_ok(type, addr, size) \
10562 +({ \
10563 + long __size = size; \
10564 + unsigned long __addr = (unsigned long)addr; \
10565 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10566 + unsigned long __end_ao = __addr + __size - 1; \
10567 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10568 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10569 + while(__addr_ao <= __end_ao) { \
10570 + char __c_ao; \
10571 + __addr_ao += PAGE_SIZE; \
10572 + if (__size > PAGE_SIZE) \
10573 + cond_resched(); \
10574 + if (__get_user(__c_ao, (char __user *)__addr)) \
10575 + break; \
10576 + if (type != VERIFY_WRITE) { \
10577 + __addr = __addr_ao; \
10578 + continue; \
10579 + } \
10580 + if (__put_user(__c_ao, (char __user *)__addr)) \
10581 + break; \
10582 + __addr = __addr_ao; \
10583 + } \
10584 + } \
10585 + __ret_ao; \
10586 +})
10587
10588 /*
10589 * The exception table consists of pairs of addresses: the first is the
10590 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10591 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10592 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10593
10594 -
10595 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10596 +#define __copyuser_seg "gs;"
10597 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10598 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10599 +#else
10600 +#define __copyuser_seg
10601 +#define __COPYUSER_SET_ES
10602 +#define __COPYUSER_RESTORE_ES
10603 +#endif
10604
10605 #ifdef CONFIG_X86_32
10606 #define __put_user_asm_u64(x, addr, err, errret) \
10607 - asm volatile("1: movl %%eax,0(%2)\n" \
10608 - "2: movl %%edx,4(%2)\n" \
10609 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10610 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10611 "3:\n" \
10612 ".section .fixup,\"ax\"\n" \
10613 "4: movl %3,%0\n" \
10614 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10615 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10616
10617 #define __put_user_asm_ex_u64(x, addr) \
10618 - asm volatile("1: movl %%eax,0(%1)\n" \
10619 - "2: movl %%edx,4(%1)\n" \
10620 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10621 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10622 "3:\n" \
10623 _ASM_EXTABLE(1b, 2b - 1b) \
10624 _ASM_EXTABLE(2b, 3b - 2b) \
10625 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10626 __typeof__(*(ptr)) __pu_val; \
10627 __chk_user_ptr(ptr); \
10628 might_fault(); \
10629 - __pu_val = x; \
10630 + __pu_val = (x); \
10631 switch (sizeof(*(ptr))) { \
10632 case 1: \
10633 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10634 @@ -373,7 +415,7 @@ do { \
10635 } while (0)
10636
10637 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10638 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10639 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10640 "2:\n" \
10641 ".section .fixup,\"ax\"\n" \
10642 "3: mov %3,%0\n" \
10643 @@ -381,7 +423,7 @@ do { \
10644 " jmp 2b\n" \
10645 ".previous\n" \
10646 _ASM_EXTABLE(1b, 3b) \
10647 - : "=r" (err), ltype(x) \
10648 + : "=r" (err), ltype (x) \
10649 : "m" (__m(addr)), "i" (errret), "0" (err))
10650
10651 #define __get_user_size_ex(x, ptr, size) \
10652 @@ -406,7 +448,7 @@ do { \
10653 } while (0)
10654
10655 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10656 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10657 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10658 "2:\n" \
10659 _ASM_EXTABLE(1b, 2b - 1b) \
10660 : ltype(x) : "m" (__m(addr)))
10661 @@ -423,13 +465,24 @@ do { \
10662 int __gu_err; \
10663 unsigned long __gu_val; \
10664 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10665 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10666 + (x) = (__typeof__(*(ptr)))__gu_val; \
10667 __gu_err; \
10668 })
10669
10670 /* FIXME: this hack is definitely wrong -AK */
10671 struct __large_struct { unsigned long buf[100]; };
10672 -#define __m(x) (*(struct __large_struct __user *)(x))
10673 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10674 +#define ____m(x) \
10675 +({ \
10676 + unsigned long ____x = (unsigned long)(x); \
10677 + if (____x < PAX_USER_SHADOW_BASE) \
10678 + ____x += PAX_USER_SHADOW_BASE; \
10679 + (void __user *)____x; \
10680 +})
10681 +#else
10682 +#define ____m(x) (x)
10683 +#endif
10684 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10685
10686 /*
10687 * Tell gcc we read from memory instead of writing: this is because
10688 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10689 * aliasing issues.
10690 */
10691 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10692 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10693 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10694 "2:\n" \
10695 ".section .fixup,\"ax\"\n" \
10696 "3: mov %3,%0\n" \
10697 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10698 ".previous\n" \
10699 _ASM_EXTABLE(1b, 3b) \
10700 : "=r"(err) \
10701 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10702 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10703
10704 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10705 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10706 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10707 "2:\n" \
10708 _ASM_EXTABLE(1b, 2b - 1b) \
10709 : : ltype(x), "m" (__m(addr)))
10710 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10711 * On error, the variable @x is set to zero.
10712 */
10713
10714 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10715 +#define __get_user(x, ptr) get_user((x), (ptr))
10716 +#else
10717 #define __get_user(x, ptr) \
10718 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10719 +#endif
10720
10721 /**
10722 * __put_user: - Write a simple value into user space, with less checking.
10723 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10724 * Returns zero on success, or -EFAULT on error.
10725 */
10726
10727 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10728 +#define __put_user(x, ptr) put_user((x), (ptr))
10729 +#else
10730 #define __put_user(x, ptr) \
10731 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10732 +#endif
10733
10734 #define __get_user_unaligned __get_user
10735 #define __put_user_unaligned __put_user
10736 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10737 #define get_user_ex(x, ptr) do { \
10738 unsigned long __gue_val; \
10739 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10740 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10741 + (x) = (__typeof__(*(ptr)))__gue_val; \
10742 } while (0)
10743
10744 #ifdef CONFIG_X86_WP_WORKS_OK
10745 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10746 index 566e803..b9521e9 100644
10747 --- a/arch/x86/include/asm/uaccess_32.h
10748 +++ b/arch/x86/include/asm/uaccess_32.h
10749 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10750 static __always_inline unsigned long __must_check
10751 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10752 {
10753 + if ((long)n < 0)
10754 + return n;
10755 +
10756 if (__builtin_constant_p(n)) {
10757 unsigned long ret;
10758
10759 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10760 return ret;
10761 }
10762 }
10763 + if (!__builtin_constant_p(n))
10764 + check_object_size(from, n, true);
10765 return __copy_to_user_ll(to, from, n);
10766 }
10767
10768 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
10769 __copy_to_user(void __user *to, const void *from, unsigned long n)
10770 {
10771 might_fault();
10772 +
10773 return __copy_to_user_inatomic(to, from, n);
10774 }
10775
10776 static __always_inline unsigned long
10777 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10778 {
10779 + if ((long)n < 0)
10780 + return n;
10781 +
10782 /* Avoid zeroing the tail if the copy fails..
10783 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10784 * but as the zeroing behaviour is only significant when n is not
10785 @@ -137,6 +146,10 @@ static __always_inline unsigned long
10786 __copy_from_user(void *to, const void __user *from, unsigned long n)
10787 {
10788 might_fault();
10789 +
10790 + if ((long)n < 0)
10791 + return n;
10792 +
10793 if (__builtin_constant_p(n)) {
10794 unsigned long ret;
10795
10796 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10797 return ret;
10798 }
10799 }
10800 + if (!__builtin_constant_p(n))
10801 + check_object_size(to, n, false);
10802 return __copy_from_user_ll(to, from, n);
10803 }
10804
10805 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10806 const void __user *from, unsigned long n)
10807 {
10808 might_fault();
10809 +
10810 + if ((long)n < 0)
10811 + return n;
10812 +
10813 if (__builtin_constant_p(n)) {
10814 unsigned long ret;
10815
10816 @@ -181,15 +200,19 @@ static __always_inline unsigned long
10817 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10818 unsigned long n)
10819 {
10820 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10821 + if ((long)n < 0)
10822 + return n;
10823 +
10824 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10825 }
10826
10827 -unsigned long __must_check copy_to_user(void __user *to,
10828 - const void *from, unsigned long n);
10829 -unsigned long __must_check _copy_from_user(void *to,
10830 - const void __user *from,
10831 - unsigned long n);
10832 -
10833 +extern void copy_to_user_overflow(void)
10834 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10835 + __compiletime_error("copy_to_user() buffer size is not provably correct")
10836 +#else
10837 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
10838 +#endif
10839 +;
10840
10841 extern void copy_from_user_overflow(void)
10842 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10843 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
10844 #endif
10845 ;
10846
10847 -static inline unsigned long __must_check copy_from_user(void *to,
10848 - const void __user *from,
10849 - unsigned long n)
10850 +/**
10851 + * copy_to_user: - Copy a block of data into user space.
10852 + * @to: Destination address, in user space.
10853 + * @from: Source address, in kernel space.
10854 + * @n: Number of bytes to copy.
10855 + *
10856 + * Context: User context only. This function may sleep.
10857 + *
10858 + * Copy data from kernel space to user space.
10859 + *
10860 + * Returns number of bytes that could not be copied.
10861 + * On success, this will be zero.
10862 + */
10863 +static inline unsigned long __must_check
10864 +copy_to_user(void __user *to, const void *from, unsigned long n)
10865 +{
10866 + int sz = __compiletime_object_size(from);
10867 +
10868 + if (unlikely(sz != -1 && sz < n))
10869 + copy_to_user_overflow();
10870 + else if (access_ok(VERIFY_WRITE, to, n))
10871 + n = __copy_to_user(to, from, n);
10872 + return n;
10873 +}
10874 +
10875 +/**
10876 + * copy_from_user: - Copy a block of data from user space.
10877 + * @to: Destination address, in kernel space.
10878 + * @from: Source address, in user space.
10879 + * @n: Number of bytes to copy.
10880 + *
10881 + * Context: User context only. This function may sleep.
10882 + *
10883 + * Copy data from user space to kernel space.
10884 + *
10885 + * Returns number of bytes that could not be copied.
10886 + * On success, this will be zero.
10887 + *
10888 + * If some data could not be copied, this function will pad the copied
10889 + * data to the requested size using zero bytes.
10890 + */
10891 +static inline unsigned long __must_check
10892 +copy_from_user(void *to, const void __user *from, unsigned long n)
10893 {
10894 int sz = __compiletime_object_size(to);
10895
10896 - if (likely(sz == -1 || sz >= n))
10897 - n = _copy_from_user(to, from, n);
10898 - else
10899 + if (unlikely(sz != -1 && sz < n))
10900 copy_from_user_overflow();
10901 -
10902 + else if (access_ok(VERIFY_READ, from, n))
10903 + n = __copy_from_user(to, from, n);
10904 + else if ((long)n > 0) {
10905 + if (!__builtin_constant_p(n))
10906 + check_object_size(to, n, false);
10907 + memset(to, 0, n);
10908 + }
10909 return n;
10910 }
10911
10912 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10913 index 1c66d30..23ab77d 100644
10914 --- a/arch/x86/include/asm/uaccess_64.h
10915 +++ b/arch/x86/include/asm/uaccess_64.h
10916 @@ -10,6 +10,9 @@
10917 #include <asm/alternative.h>
10918 #include <asm/cpufeature.h>
10919 #include <asm/page.h>
10920 +#include <asm/pgtable.h>
10921 +
10922 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10923
10924 /*
10925 * Copy To/From Userspace
10926 @@ -17,12 +20,12 @@
10927
10928 /* Handles exceptions in both to and from, but doesn't do access_ok */
10929 __must_check unsigned long
10930 -copy_user_generic_string(void *to, const void *from, unsigned len);
10931 +copy_user_generic_string(void *to, const void *from, unsigned long len);
10932 __must_check unsigned long
10933 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10934 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10935
10936 static __always_inline __must_check unsigned long
10937 -copy_user_generic(void *to, const void *from, unsigned len)
10938 +copy_user_generic(void *to, const void *from, unsigned long len)
10939 {
10940 unsigned ret;
10941
10942 @@ -36,138 +39,222 @@ copy_user_generic(void *to, const void *from, unsigned len)
10943 return ret;
10944 }
10945
10946 +static __always_inline __must_check unsigned long
10947 +__copy_to_user(void __user *to, const void *from, unsigned long len);
10948 +static __always_inline __must_check unsigned long
10949 +__copy_from_user(void *to, const void __user *from, unsigned long len);
10950 __must_check unsigned long
10951 -_copy_to_user(void __user *to, const void *from, unsigned len);
10952 -__must_check unsigned long
10953 -_copy_from_user(void *to, const void __user *from, unsigned len);
10954 -__must_check unsigned long
10955 -copy_in_user(void __user *to, const void __user *from, unsigned len);
10956 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
10957
10958 static inline unsigned long __must_check copy_from_user(void *to,
10959 const void __user *from,
10960 unsigned long n)
10961 {
10962 - int sz = __compiletime_object_size(to);
10963 -
10964 might_fault();
10965 - if (likely(sz == -1 || sz >= n))
10966 - n = _copy_from_user(to, from, n);
10967 -#ifdef CONFIG_DEBUG_VM
10968 - else
10969 - WARN(1, "Buffer overflow detected!\n");
10970 -#endif
10971 +
10972 + if (access_ok(VERIFY_READ, from, n))
10973 + n = __copy_from_user(to, from, n);
10974 + else if (n < INT_MAX) {
10975 + if (!__builtin_constant_p(n))
10976 + check_object_size(to, n, false);
10977 + memset(to, 0, n);
10978 + }
10979 return n;
10980 }
10981
10982 static __always_inline __must_check
10983 -int copy_to_user(void __user *dst, const void *src, unsigned size)
10984 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
10985 {
10986 might_fault();
10987
10988 - return _copy_to_user(dst, src, size);
10989 + if (access_ok(VERIFY_WRITE, dst, size))
10990 + size = __copy_to_user(dst, src, size);
10991 + return size;
10992 }
10993
10994 static __always_inline __must_check
10995 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10996 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
10997 {
10998 - int ret = 0;
10999 + int sz = __compiletime_object_size(dst);
11000 + unsigned ret = 0;
11001
11002 might_fault();
11003 - if (!__builtin_constant_p(size))
11004 - return copy_user_generic(dst, (__force void *)src, size);
11005 +
11006 + if (size > INT_MAX)
11007 + return size;
11008 +
11009 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11010 + if (!__access_ok(VERIFY_READ, src, size))
11011 + return size;
11012 +#endif
11013 +
11014 + if (unlikely(sz != -1 && sz < size)) {
11015 +#ifdef CONFIG_DEBUG_VM
11016 + WARN(1, "Buffer overflow detected!\n");
11017 +#endif
11018 + return size;
11019 + }
11020 +
11021 + if (!__builtin_constant_p(size)) {
11022 + check_object_size(dst, size, false);
11023 +
11024 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11025 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11026 + src += PAX_USER_SHADOW_BASE;
11027 +#endif
11028 +
11029 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11030 + }
11031 switch (size) {
11032 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11033 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11034 ret, "b", "b", "=q", 1);
11035 return ret;
11036 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11037 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11038 ret, "w", "w", "=r", 2);
11039 return ret;
11040 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11041 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11042 ret, "l", "k", "=r", 4);
11043 return ret;
11044 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11045 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11046 ret, "q", "", "=r", 8);
11047 return ret;
11048 case 10:
11049 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11050 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11051 ret, "q", "", "=r", 10);
11052 if (unlikely(ret))
11053 return ret;
11054 __get_user_asm(*(u16 *)(8 + (char *)dst),
11055 - (u16 __user *)(8 + (char __user *)src),
11056 + (const u16 __user *)(8 + (const char __user *)src),
11057 ret, "w", "w", "=r", 2);
11058 return ret;
11059 case 16:
11060 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11061 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11062 ret, "q", "", "=r", 16);
11063 if (unlikely(ret))
11064 return ret;
11065 __get_user_asm(*(u64 *)(8 + (char *)dst),
11066 - (u64 __user *)(8 + (char __user *)src),
11067 + (const u64 __user *)(8 + (const char __user *)src),
11068 ret, "q", "", "=r", 8);
11069 return ret;
11070 default:
11071 - return copy_user_generic(dst, (__force void *)src, size);
11072 +
11073 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11074 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11075 + src += PAX_USER_SHADOW_BASE;
11076 +#endif
11077 +
11078 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11079 }
11080 }
11081
11082 static __always_inline __must_check
11083 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
11084 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11085 {
11086 - int ret = 0;
11087 + int sz = __compiletime_object_size(src);
11088 + unsigned ret = 0;
11089
11090 might_fault();
11091 - if (!__builtin_constant_p(size))
11092 - return copy_user_generic((__force void *)dst, src, size);
11093 +
11094 + if (size > INT_MAX)
11095 + return size;
11096 +
11097 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11098 + if (!__access_ok(VERIFY_WRITE, dst, size))
11099 + return size;
11100 +#endif
11101 +
11102 + if (unlikely(sz != -1 && sz < size)) {
11103 +#ifdef CONFIG_DEBUG_VM
11104 + WARN(1, "Buffer overflow detected!\n");
11105 +#endif
11106 + return size;
11107 + }
11108 +
11109 + if (!__builtin_constant_p(size)) {
11110 + check_object_size(src, size, true);
11111 +
11112 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11113 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11114 + dst += PAX_USER_SHADOW_BASE;
11115 +#endif
11116 +
11117 + return copy_user_generic((__force_kernel void *)dst, src, size);
11118 + }
11119 switch (size) {
11120 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11121 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11122 ret, "b", "b", "iq", 1);
11123 return ret;
11124 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11125 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11126 ret, "w", "w", "ir", 2);
11127 return ret;
11128 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11129 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11130 ret, "l", "k", "ir", 4);
11131 return ret;
11132 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11133 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11134 ret, "q", "", "er", 8);
11135 return ret;
11136 case 10:
11137 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11138 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11139 ret, "q", "", "er", 10);
11140 if (unlikely(ret))
11141 return ret;
11142 asm("":::"memory");
11143 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11144 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11145 ret, "w", "w", "ir", 2);
11146 return ret;
11147 case 16:
11148 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11149 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11150 ret, "q", "", "er", 16);
11151 if (unlikely(ret))
11152 return ret;
11153 asm("":::"memory");
11154 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11155 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11156 ret, "q", "", "er", 8);
11157 return ret;
11158 default:
11159 - return copy_user_generic((__force void *)dst, src, size);
11160 +
11161 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11162 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11163 + dst += PAX_USER_SHADOW_BASE;
11164 +#endif
11165 +
11166 + return copy_user_generic((__force_kernel void *)dst, src, size);
11167 }
11168 }
11169
11170 static __always_inline __must_check
11171 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11172 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11173 {
11174 - int ret = 0;
11175 + unsigned ret = 0;
11176
11177 might_fault();
11178 - if (!__builtin_constant_p(size))
11179 - return copy_user_generic((__force void *)dst,
11180 - (__force void *)src, size);
11181 +
11182 + if (size > INT_MAX)
11183 + return size;
11184 +
11185 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11186 + if (!__access_ok(VERIFY_READ, src, size))
11187 + return size;
11188 + if (!__access_ok(VERIFY_WRITE, dst, size))
11189 + return size;
11190 +#endif
11191 +
11192 + if (!__builtin_constant_p(size)) {
11193 +
11194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11195 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11196 + src += PAX_USER_SHADOW_BASE;
11197 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11198 + dst += PAX_USER_SHADOW_BASE;
11199 +#endif
11200 +
11201 + return copy_user_generic((__force_kernel void *)dst,
11202 + (__force_kernel const void *)src, size);
11203 + }
11204 switch (size) {
11205 case 1: {
11206 u8 tmp;
11207 - __get_user_asm(tmp, (u8 __user *)src,
11208 + __get_user_asm(tmp, (const u8 __user *)src,
11209 ret, "b", "b", "=q", 1);
11210 if (likely(!ret))
11211 __put_user_asm(tmp, (u8 __user *)dst,
11212 @@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11213 }
11214 case 2: {
11215 u16 tmp;
11216 - __get_user_asm(tmp, (u16 __user *)src,
11217 + __get_user_asm(tmp, (const u16 __user *)src,
11218 ret, "w", "w", "=r", 2);
11219 if (likely(!ret))
11220 __put_user_asm(tmp, (u16 __user *)dst,
11221 @@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11222
11223 case 4: {
11224 u32 tmp;
11225 - __get_user_asm(tmp, (u32 __user *)src,
11226 + __get_user_asm(tmp, (const u32 __user *)src,
11227 ret, "l", "k", "=r", 4);
11228 if (likely(!ret))
11229 __put_user_asm(tmp, (u32 __user *)dst,
11230 @@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11231 }
11232 case 8: {
11233 u64 tmp;
11234 - __get_user_asm(tmp, (u64 __user *)src,
11235 + __get_user_asm(tmp, (const u64 __user *)src,
11236 ret, "q", "", "=r", 8);
11237 if (likely(!ret))
11238 __put_user_asm(tmp, (u64 __user *)dst,
11239 @@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11240 return ret;
11241 }
11242 default:
11243 - return copy_user_generic((__force void *)dst,
11244 - (__force void *)src, size);
11245 +
11246 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11247 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11248 + src += PAX_USER_SHADOW_BASE;
11249 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11250 + dst += PAX_USER_SHADOW_BASE;
11251 +#endif
11252 +
11253 + return copy_user_generic((__force_kernel void *)dst,
11254 + (__force_kernel const void *)src, size);
11255 }
11256 }
11257
11258 @@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11259 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11260
11261 static __must_check __always_inline int
11262 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11263 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11264 {
11265 - return copy_user_generic(dst, (__force const void *)src, size);
11266 + if (size > INT_MAX)
11267 + return size;
11268 +
11269 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11270 + if (!__access_ok(VERIFY_READ, src, size))
11271 + return size;
11272 +
11273 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11274 + src += PAX_USER_SHADOW_BASE;
11275 +#endif
11276 +
11277 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11278 }
11279
11280 -static __must_check __always_inline int
11281 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11282 +static __must_check __always_inline unsigned long
11283 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11284 {
11285 - return copy_user_generic((__force void *)dst, src, size);
11286 + if (size > INT_MAX)
11287 + return size;
11288 +
11289 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11290 + if (!__access_ok(VERIFY_WRITE, dst, size))
11291 + return size;
11292 +
11293 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11294 + dst += PAX_USER_SHADOW_BASE;
11295 +#endif
11296 +
11297 + return copy_user_generic((__force_kernel void *)dst, src, size);
11298 }
11299
11300 -extern long __copy_user_nocache(void *dst, const void __user *src,
11301 - unsigned size, int zerorest);
11302 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11303 + unsigned long size, int zerorest);
11304
11305 -static inline int
11306 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11307 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11308 {
11309 might_sleep();
11310 +
11311 + if (size > INT_MAX)
11312 + return size;
11313 +
11314 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11315 + if (!__access_ok(VERIFY_READ, src, size))
11316 + return size;
11317 +#endif
11318 +
11319 return __copy_user_nocache(dst, src, size, 1);
11320 }
11321
11322 -static inline int
11323 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11324 - unsigned size)
11325 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11326 + unsigned long size)
11327 {
11328 + if (size > INT_MAX)
11329 + return size;
11330 +
11331 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11332 + if (!__access_ok(VERIFY_READ, src, size))
11333 + return size;
11334 +#endif
11335 +
11336 return __copy_user_nocache(dst, src, size, 0);
11337 }
11338
11339 -unsigned long
11340 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11341 +extern unsigned long
11342 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11343
11344 #endif /* _ASM_X86_UACCESS_64_H */
11345 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11346 index bb05228..d763d5b 100644
11347 --- a/arch/x86/include/asm/vdso.h
11348 +++ b/arch/x86/include/asm/vdso.h
11349 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11350 #define VDSO32_SYMBOL(base, name) \
11351 ({ \
11352 extern const char VDSO32_##name[]; \
11353 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11354 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11355 })
11356 #endif
11357
11358 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11359 index 1971e65..1e3559b 100644
11360 --- a/arch/x86/include/asm/x86_init.h
11361 +++ b/arch/x86/include/asm/x86_init.h
11362 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11363 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11364 void (*find_smp_config)(void);
11365 void (*get_smp_config)(unsigned int early);
11366 -};
11367 +} __no_const;
11368
11369 /**
11370 * struct x86_init_resources - platform specific resource related ops
11371 @@ -42,7 +42,7 @@ struct x86_init_resources {
11372 void (*probe_roms)(void);
11373 void (*reserve_resources)(void);
11374 char *(*memory_setup)(void);
11375 -};
11376 +} __no_const;
11377
11378 /**
11379 * struct x86_init_irqs - platform specific interrupt setup
11380 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11381 void (*pre_vector_init)(void);
11382 void (*intr_init)(void);
11383 void (*trap_init)(void);
11384 -};
11385 +} __no_const;
11386
11387 /**
11388 * struct x86_init_oem - oem platform specific customizing functions
11389 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11390 struct x86_init_oem {
11391 void (*arch_setup)(void);
11392 void (*banner)(void);
11393 -};
11394 +} __no_const;
11395
11396 /**
11397 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11398 @@ -76,7 +76,7 @@ struct x86_init_oem {
11399 */
11400 struct x86_init_mapping {
11401 void (*pagetable_reserve)(u64 start, u64 end);
11402 -};
11403 +} __no_const;
11404
11405 /**
11406 * struct x86_init_paging - platform specific paging functions
11407 @@ -86,7 +86,7 @@ struct x86_init_mapping {
11408 struct x86_init_paging {
11409 void (*pagetable_setup_start)(pgd_t *base);
11410 void (*pagetable_setup_done)(pgd_t *base);
11411 -};
11412 +} __no_const;
11413
11414 /**
11415 * struct x86_init_timers - platform specific timer setup
11416 @@ -101,7 +101,7 @@ struct x86_init_timers {
11417 void (*tsc_pre_init)(void);
11418 void (*timer_init)(void);
11419 void (*wallclock_init)(void);
11420 -};
11421 +} __no_const;
11422
11423 /**
11424 * struct x86_init_iommu - platform specific iommu setup
11425 @@ -109,7 +109,7 @@ struct x86_init_timers {
11426 */
11427 struct x86_init_iommu {
11428 int (*iommu_init)(void);
11429 -};
11430 +} __no_const;
11431
11432 /**
11433 * struct x86_init_pci - platform specific pci init functions
11434 @@ -123,7 +123,7 @@ struct x86_init_pci {
11435 int (*init)(void);
11436 void (*init_irq)(void);
11437 void (*fixup_irqs)(void);
11438 -};
11439 +} __no_const;
11440
11441 /**
11442 * struct x86_init_ops - functions for platform specific setup
11443 @@ -139,7 +139,7 @@ struct x86_init_ops {
11444 struct x86_init_timers timers;
11445 struct x86_init_iommu iommu;
11446 struct x86_init_pci pci;
11447 -};
11448 +} __no_const;
11449
11450 /**
11451 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11452 @@ -147,7 +147,7 @@ struct x86_init_ops {
11453 */
11454 struct x86_cpuinit_ops {
11455 void (*setup_percpu_clockev)(void);
11456 -};
11457 +} __no_const;
11458
11459 /**
11460 * struct x86_platform_ops - platform specific runtime functions
11461 @@ -169,7 +169,7 @@ struct x86_platform_ops {
11462 void (*nmi_init)(void);
11463 unsigned char (*get_nmi_reason)(void);
11464 int (*i8042_detect)(void);
11465 -};
11466 +} __no_const;
11467
11468 struct pci_dev;
11469
11470 @@ -177,7 +177,7 @@ struct x86_msi_ops {
11471 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11472 void (*teardown_msi_irq)(unsigned int irq);
11473 void (*teardown_msi_irqs)(struct pci_dev *dev);
11474 -};
11475 +} __no_const;
11476
11477 extern struct x86_init_ops x86_init;
11478 extern struct x86_cpuinit_ops x86_cpuinit;
11479 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11480 index c6ce245..ffbdab7 100644
11481 --- a/arch/x86/include/asm/xsave.h
11482 +++ b/arch/x86/include/asm/xsave.h
11483 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11484 {
11485 int err;
11486
11487 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11488 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11489 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11490 +#endif
11491 +
11492 /*
11493 * Clear the xsave header first, so that reserved fields are
11494 * initialized to zero.
11495 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11496 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11497 {
11498 int err;
11499 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11500 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11501 u32 lmask = mask;
11502 u32 hmask = mask >> 32;
11503
11504 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11505 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11506 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11507 +#endif
11508 +
11509 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11510 "2:\n"
11511 ".section .fixup,\"ax\"\n"
11512 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11513 index 6a564ac..9b1340c 100644
11514 --- a/arch/x86/kernel/acpi/realmode/Makefile
11515 +++ b/arch/x86/kernel/acpi/realmode/Makefile
11516 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11517 $(call cc-option, -fno-stack-protector) \
11518 $(call cc-option, -mpreferred-stack-boundary=2)
11519 KBUILD_CFLAGS += $(call cc-option, -m32)
11520 +ifdef CONSTIFY_PLUGIN
11521 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11522 +endif
11523 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11524 GCOV_PROFILE := n
11525
11526 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11527 index b4fd836..4358fe3 100644
11528 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
11529 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11530 @@ -108,6 +108,9 @@ wakeup_code:
11531 /* Do any other stuff... */
11532
11533 #ifndef CONFIG_64BIT
11534 + /* Recheck NX bit overrides (64bit path does this in trampoline */
11535 + call verify_cpu
11536 +
11537 /* This could also be done in C code... */
11538 movl pmode_cr3, %eax
11539 movl %eax, %cr3
11540 @@ -131,6 +134,7 @@ wakeup_code:
11541 movl pmode_cr0, %eax
11542 movl %eax, %cr0
11543 jmp pmode_return
11544 +# include "../../verify_cpu.S"
11545 #else
11546 pushw $0
11547 pushw trampoline_segment
11548 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11549 index 103b6ab..2004d0a 100644
11550 --- a/arch/x86/kernel/acpi/sleep.c
11551 +++ b/arch/x86/kernel/acpi/sleep.c
11552 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11553 header->trampoline_segment = trampoline_address() >> 4;
11554 #ifdef CONFIG_SMP
11555 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11556 +
11557 + pax_open_kernel();
11558 early_gdt_descr.address =
11559 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11560 + pax_close_kernel();
11561 +
11562 initial_gs = per_cpu_offset(smp_processor_id());
11563 #endif
11564 initial_code = (unsigned long)wakeup_long64;
11565 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11566 index 13ab720..95d5442 100644
11567 --- a/arch/x86/kernel/acpi/wakeup_32.S
11568 +++ b/arch/x86/kernel/acpi/wakeup_32.S
11569 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11570 # and restore the stack ... but you need gdt for this to work
11571 movl saved_context_esp, %esp
11572
11573 - movl %cs:saved_magic, %eax
11574 - cmpl $0x12345678, %eax
11575 + cmpl $0x12345678, saved_magic
11576 jne bogus_magic
11577
11578 # jump to place where we left off
11579 - movl saved_eip, %eax
11580 - jmp *%eax
11581 + jmp *(saved_eip)
11582
11583 bogus_magic:
11584 jmp bogus_magic
11585 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11586 index 1f84794..e23f862 100644
11587 --- a/arch/x86/kernel/alternative.c
11588 +++ b/arch/x86/kernel/alternative.c
11589 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11590 */
11591 for (a = start; a < end; a++) {
11592 instr = (u8 *)&a->instr_offset + a->instr_offset;
11593 +
11594 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11595 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11596 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11597 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11598 +#endif
11599 +
11600 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11601 BUG_ON(a->replacementlen > a->instrlen);
11602 BUG_ON(a->instrlen > sizeof(insnbuf));
11603 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11604 for (poff = start; poff < end; poff++) {
11605 u8 *ptr = (u8 *)poff + *poff;
11606
11607 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11608 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11609 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11610 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11611 +#endif
11612 +
11613 if (!*poff || ptr < text || ptr >= text_end)
11614 continue;
11615 /* turn DS segment override prefix into lock prefix */
11616 - if (*ptr == 0x3e)
11617 + if (*ktla_ktva(ptr) == 0x3e)
11618 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11619 };
11620 mutex_unlock(&text_mutex);
11621 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11622 for (poff = start; poff < end; poff++) {
11623 u8 *ptr = (u8 *)poff + *poff;
11624
11625 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11626 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11627 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11628 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11629 +#endif
11630 +
11631 if (!*poff || ptr < text || ptr >= text_end)
11632 continue;
11633 /* turn lock prefix into DS segment override prefix */
11634 - if (*ptr == 0xf0)
11635 + if (*ktla_ktva(ptr) == 0xf0)
11636 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11637 };
11638 mutex_unlock(&text_mutex);
11639 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11640
11641 BUG_ON(p->len > MAX_PATCH_LEN);
11642 /* prep the buffer with the original instructions */
11643 - memcpy(insnbuf, p->instr, p->len);
11644 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11645 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11646 (unsigned long)p->instr, p->len);
11647
11648 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11649 if (smp_alt_once)
11650 free_init_pages("SMP alternatives",
11651 (unsigned long)__smp_locks,
11652 - (unsigned long)__smp_locks_end);
11653 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11654
11655 restart_nmi();
11656 }
11657 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11658 * instructions. And on the local CPU you need to be protected again NMI or MCE
11659 * handlers seeing an inconsistent instruction while you patch.
11660 */
11661 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
11662 +void *__kprobes text_poke_early(void *addr, const void *opcode,
11663 size_t len)
11664 {
11665 unsigned long flags;
11666 local_irq_save(flags);
11667 - memcpy(addr, opcode, len);
11668 +
11669 + pax_open_kernel();
11670 + memcpy(ktla_ktva(addr), opcode, len);
11671 sync_core();
11672 + pax_close_kernel();
11673 +
11674 local_irq_restore(flags);
11675 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11676 that causes hangs on some VIA CPUs. */
11677 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11678 */
11679 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11680 {
11681 - unsigned long flags;
11682 - char *vaddr;
11683 + unsigned char *vaddr = ktla_ktva(addr);
11684 struct page *pages[2];
11685 - int i;
11686 + size_t i;
11687
11688 if (!core_kernel_text((unsigned long)addr)) {
11689 - pages[0] = vmalloc_to_page(addr);
11690 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11691 + pages[0] = vmalloc_to_page(vaddr);
11692 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11693 } else {
11694 - pages[0] = virt_to_page(addr);
11695 + pages[0] = virt_to_page(vaddr);
11696 WARN_ON(!PageReserved(pages[0]));
11697 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11698 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11699 }
11700 BUG_ON(!pages[0]);
11701 - local_irq_save(flags);
11702 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11703 - if (pages[1])
11704 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11705 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11706 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11707 - clear_fixmap(FIX_TEXT_POKE0);
11708 - if (pages[1])
11709 - clear_fixmap(FIX_TEXT_POKE1);
11710 - local_flush_tlb();
11711 - sync_core();
11712 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11713 - that causes hangs on some VIA CPUs. */
11714 + text_poke_early(addr, opcode, len);
11715 for (i = 0; i < len; i++)
11716 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11717 - local_irq_restore(flags);
11718 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11719 return addr;
11720 }
11721
11722 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11723 index f98d84c..e402a69 100644
11724 --- a/arch/x86/kernel/apic/apic.c
11725 +++ b/arch/x86/kernel/apic/apic.c
11726 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11727 /*
11728 * Debug level, exported for io_apic.c
11729 */
11730 -unsigned int apic_verbosity;
11731 +int apic_verbosity;
11732
11733 int pic_mode;
11734
11735 @@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11736 apic_write(APIC_ESR, 0);
11737 v1 = apic_read(APIC_ESR);
11738 ack_APIC_irq();
11739 - atomic_inc(&irq_err_count);
11740 + atomic_inc_unchecked(&irq_err_count);
11741
11742 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11743 smp_processor_id(), v0 , v1);
11744 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11745 index 6d939d7..0697fcc 100644
11746 --- a/arch/x86/kernel/apic/io_apic.c
11747 +++ b/arch/x86/kernel/apic/io_apic.c
11748 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11749 }
11750 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11751
11752 -void lock_vector_lock(void)
11753 +void lock_vector_lock(void) __acquires(vector_lock)
11754 {
11755 /* Used to the online set of cpus does not change
11756 * during assign_irq_vector.
11757 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
11758 raw_spin_lock(&vector_lock);
11759 }
11760
11761 -void unlock_vector_lock(void)
11762 +void unlock_vector_lock(void) __releases(vector_lock)
11763 {
11764 raw_spin_unlock(&vector_lock);
11765 }
11766 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
11767 ack_APIC_irq();
11768 }
11769
11770 -atomic_t irq_mis_count;
11771 +atomic_unchecked_t irq_mis_count;
11772
11773 static void ack_apic_level(struct irq_data *data)
11774 {
11775 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
11776 * at the cpu.
11777 */
11778 if (!(v & (1 << (i & 0x1f)))) {
11779 - atomic_inc(&irq_mis_count);
11780 + atomic_inc_unchecked(&irq_mis_count);
11781
11782 eoi_ioapic_irq(irq, cfg);
11783 }
11784 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11785 index a46bd38..6b906d7 100644
11786 --- a/arch/x86/kernel/apm_32.c
11787 +++ b/arch/x86/kernel/apm_32.c
11788 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
11789 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11790 * even though they are called in protected mode.
11791 */
11792 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11793 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11794 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11795
11796 static const char driver_version[] = "1.16ac"; /* no spaces */
11797 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
11798 BUG_ON(cpu != 0);
11799 gdt = get_cpu_gdt_table(cpu);
11800 save_desc_40 = gdt[0x40 / 8];
11801 +
11802 + pax_open_kernel();
11803 gdt[0x40 / 8] = bad_bios_desc;
11804 + pax_close_kernel();
11805
11806 apm_irq_save(flags);
11807 APM_DO_SAVE_SEGS;
11808 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
11809 &call->esi);
11810 APM_DO_RESTORE_SEGS;
11811 apm_irq_restore(flags);
11812 +
11813 + pax_open_kernel();
11814 gdt[0x40 / 8] = save_desc_40;
11815 + pax_close_kernel();
11816 +
11817 put_cpu();
11818
11819 return call->eax & 0xff;
11820 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
11821 BUG_ON(cpu != 0);
11822 gdt = get_cpu_gdt_table(cpu);
11823 save_desc_40 = gdt[0x40 / 8];
11824 +
11825 + pax_open_kernel();
11826 gdt[0x40 / 8] = bad_bios_desc;
11827 + pax_close_kernel();
11828
11829 apm_irq_save(flags);
11830 APM_DO_SAVE_SEGS;
11831 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
11832 &call->eax);
11833 APM_DO_RESTORE_SEGS;
11834 apm_irq_restore(flags);
11835 +
11836 + pax_open_kernel();
11837 gdt[0x40 / 8] = save_desc_40;
11838 + pax_close_kernel();
11839 +
11840 put_cpu();
11841 return error;
11842 }
11843 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
11844 * code to that CPU.
11845 */
11846 gdt = get_cpu_gdt_table(0);
11847 +
11848 + pax_open_kernel();
11849 set_desc_base(&gdt[APM_CS >> 3],
11850 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11851 set_desc_base(&gdt[APM_CS_16 >> 3],
11852 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11853 set_desc_base(&gdt[APM_DS >> 3],
11854 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11855 + pax_close_kernel();
11856
11857 proc_create("apm", 0, NULL, &apm_file_ops);
11858
11859 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11860 index 4f13faf..87db5d2 100644
11861 --- a/arch/x86/kernel/asm-offsets.c
11862 +++ b/arch/x86/kernel/asm-offsets.c
11863 @@ -33,6 +33,8 @@ void common(void) {
11864 OFFSET(TI_status, thread_info, status);
11865 OFFSET(TI_addr_limit, thread_info, addr_limit);
11866 OFFSET(TI_preempt_count, thread_info, preempt_count);
11867 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11868 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11869
11870 BLANK();
11871 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11872 @@ -53,8 +55,26 @@ void common(void) {
11873 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11874 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11875 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11876 +
11877 +#ifdef CONFIG_PAX_KERNEXEC
11878 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11879 #endif
11880
11881 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11882 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11883 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11884 +#ifdef CONFIG_X86_64
11885 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11886 +#endif
11887 +#endif
11888 +
11889 +#endif
11890 +
11891 + BLANK();
11892 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11893 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11894 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11895 +
11896 #ifdef CONFIG_XEN
11897 BLANK();
11898 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11899 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11900 index e72a119..6e2955d 100644
11901 --- a/arch/x86/kernel/asm-offsets_64.c
11902 +++ b/arch/x86/kernel/asm-offsets_64.c
11903 @@ -69,6 +69,7 @@ int main(void)
11904 BLANK();
11905 #undef ENTRY
11906
11907 + DEFINE(TSS_size, sizeof(struct tss_struct));
11908 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11909 BLANK();
11910
11911 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11912 index 25f24dc..4094a7f 100644
11913 --- a/arch/x86/kernel/cpu/Makefile
11914 +++ b/arch/x86/kernel/cpu/Makefile
11915 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11916 CFLAGS_REMOVE_perf_event.o = -pg
11917 endif
11918
11919 -# Make sure load_percpu_segment has no stackprotector
11920 -nostackp := $(call cc-option, -fno-stack-protector)
11921 -CFLAGS_common.o := $(nostackp)
11922 -
11923 obj-y := intel_cacheinfo.o scattered.o topology.o
11924 obj-y += proc.o capflags.o powerflags.o common.o
11925 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11926 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11927 index 0bab2b1..d0a1bf8 100644
11928 --- a/arch/x86/kernel/cpu/amd.c
11929 +++ b/arch/x86/kernel/cpu/amd.c
11930 @@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11931 unsigned int size)
11932 {
11933 /* AMD errata T13 (order #21922) */
11934 - if ((c->x86 == 6)) {
11935 + if (c->x86 == 6) {
11936 /* Duron Rev A0 */
11937 if (c->x86_model == 3 && c->x86_mask == 0)
11938 size = 64;
11939 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11940 index aa003b1..47ea638 100644
11941 --- a/arch/x86/kernel/cpu/common.c
11942 +++ b/arch/x86/kernel/cpu/common.c
11943 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11944
11945 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11946
11947 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11948 -#ifdef CONFIG_X86_64
11949 - /*
11950 - * We need valid kernel segments for data and code in long mode too
11951 - * IRET will check the segment types kkeil 2000/10/28
11952 - * Also sysret mandates a special GDT layout
11953 - *
11954 - * TLS descriptors are currently at a different place compared to i386.
11955 - * Hopefully nobody expects them at a fixed place (Wine?)
11956 - */
11957 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11958 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11959 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11960 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11961 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11962 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11963 -#else
11964 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11965 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11966 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11967 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11968 - /*
11969 - * Segments used for calling PnP BIOS have byte granularity.
11970 - * They code segments and data segments have fixed 64k limits,
11971 - * the transfer segment sizes are set at run time.
11972 - */
11973 - /* 32-bit code */
11974 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11975 - /* 16-bit code */
11976 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11977 - /* 16-bit data */
11978 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11979 - /* 16-bit data */
11980 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11981 - /* 16-bit data */
11982 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11983 - /*
11984 - * The APM segments have byte granularity and their bases
11985 - * are set at run time. All have 64k limits.
11986 - */
11987 - /* 32-bit code */
11988 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11989 - /* 16-bit code */
11990 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11991 - /* data */
11992 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11993 -
11994 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11995 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11996 - GDT_STACK_CANARY_INIT
11997 -#endif
11998 -} };
11999 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12000 -
12001 static int __init x86_xsave_setup(char *s)
12002 {
12003 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12004 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12005 {
12006 struct desc_ptr gdt_descr;
12007
12008 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12009 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12010 gdt_descr.size = GDT_SIZE - 1;
12011 load_gdt(&gdt_descr);
12012 /* Reload the per-cpu base */
12013 @@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12014 /* Filter out anything that depends on CPUID levels we don't have */
12015 filter_cpuid_features(c, true);
12016
12017 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12018 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12019 +#endif
12020 +
12021 /* If the model name is still unset, do table lookup. */
12022 if (!c->x86_model_id[0]) {
12023 const char *p;
12024 @@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12025 }
12026 __setup("clearcpuid=", setup_disablecpuid);
12027
12028 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12029 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12030 +
12031 #ifdef CONFIG_X86_64
12032 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12033
12034 @@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12035 EXPORT_PER_CPU_SYMBOL(current_task);
12036
12037 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12038 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12039 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12040 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12041
12042 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12043 @@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12044 {
12045 memset(regs, 0, sizeof(struct pt_regs));
12046 regs->fs = __KERNEL_PERCPU;
12047 - regs->gs = __KERNEL_STACK_CANARY;
12048 + savesegment(gs, regs->gs);
12049
12050 return regs;
12051 }
12052 @@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12053 int i;
12054
12055 cpu = stack_smp_processor_id();
12056 - t = &per_cpu(init_tss, cpu);
12057 + t = init_tss + cpu;
12058 oist = &per_cpu(orig_ist, cpu);
12059
12060 #ifdef CONFIG_NUMA
12061 @@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12062 switch_to_new_gdt(cpu);
12063 loadsegment(fs, 0);
12064
12065 - load_idt((const struct desc_ptr *)&idt_descr);
12066 + load_idt(&idt_descr);
12067
12068 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12069 syscall_init();
12070 @@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12071 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12072 barrier();
12073
12074 - x86_configure_nx();
12075 if (cpu != 0)
12076 enable_x2apic();
12077
12078 @@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12079 {
12080 int cpu = smp_processor_id();
12081 struct task_struct *curr = current;
12082 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12083 + struct tss_struct *t = init_tss + cpu;
12084 struct thread_struct *thread = &curr->thread;
12085
12086 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12087 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12088 index 5231312..a78a987 100644
12089 --- a/arch/x86/kernel/cpu/intel.c
12090 +++ b/arch/x86/kernel/cpu/intel.c
12091 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12092 * Update the IDT descriptor and reload the IDT so that
12093 * it uses the read-only mapped virtual address.
12094 */
12095 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12096 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12097 load_idt(&idt_descr);
12098 }
12099 #endif
12100 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12101 index 2af127d..8ff7ac0 100644
12102 --- a/arch/x86/kernel/cpu/mcheck/mce.c
12103 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
12104 @@ -42,6 +42,7 @@
12105 #include <asm/processor.h>
12106 #include <asm/mce.h>
12107 #include <asm/msr.h>
12108 +#include <asm/local.h>
12109
12110 #include "mce-internal.h"
12111
12112 @@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12113 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12114 m->cs, m->ip);
12115
12116 - if (m->cs == __KERNEL_CS)
12117 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12118 print_symbol("{%s}", m->ip);
12119 pr_cont("\n");
12120 }
12121 @@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12122
12123 #define PANIC_TIMEOUT 5 /* 5 seconds */
12124
12125 -static atomic_t mce_paniced;
12126 +static atomic_unchecked_t mce_paniced;
12127
12128 static int fake_panic;
12129 -static atomic_t mce_fake_paniced;
12130 +static atomic_unchecked_t mce_fake_paniced;
12131
12132 /* Panic in progress. Enable interrupts and wait for final IPI */
12133 static void wait_for_panic(void)
12134 @@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12135 /*
12136 * Make sure only one CPU runs in machine check panic
12137 */
12138 - if (atomic_inc_return(&mce_paniced) > 1)
12139 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12140 wait_for_panic();
12141 barrier();
12142
12143 @@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12144 console_verbose();
12145 } else {
12146 /* Don't log too much for fake panic */
12147 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12148 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12149 return;
12150 }
12151 /* First print corrected ones that are still unlogged */
12152 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12153 * might have been modified by someone else.
12154 */
12155 rmb();
12156 - if (atomic_read(&mce_paniced))
12157 + if (atomic_read_unchecked(&mce_paniced))
12158 wait_for_panic();
12159 if (!monarch_timeout)
12160 goto out;
12161 @@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12162 }
12163
12164 /* Call the installed machine check handler for this CPU setup. */
12165 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
12166 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12167 unexpected_machine_check;
12168
12169 /*
12170 @@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12171 return;
12172 }
12173
12174 + pax_open_kernel();
12175 machine_check_vector = do_machine_check;
12176 + pax_close_kernel();
12177
12178 __mcheck_cpu_init_generic();
12179 __mcheck_cpu_init_vendor(c);
12180 @@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12181 */
12182
12183 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12184 -static int mce_chrdev_open_count; /* #times opened */
12185 +static local_t mce_chrdev_open_count; /* #times opened */
12186 static int mce_chrdev_open_exclu; /* already open exclusive? */
12187
12188 static int mce_chrdev_open(struct inode *inode, struct file *file)
12189 @@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12190 spin_lock(&mce_chrdev_state_lock);
12191
12192 if (mce_chrdev_open_exclu ||
12193 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12194 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12195 spin_unlock(&mce_chrdev_state_lock);
12196
12197 return -EBUSY;
12198 @@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12199
12200 if (file->f_flags & O_EXCL)
12201 mce_chrdev_open_exclu = 1;
12202 - mce_chrdev_open_count++;
12203 + local_inc(&mce_chrdev_open_count);
12204
12205 spin_unlock(&mce_chrdev_state_lock);
12206
12207 @@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12208 {
12209 spin_lock(&mce_chrdev_state_lock);
12210
12211 - mce_chrdev_open_count--;
12212 + local_dec(&mce_chrdev_open_count);
12213 mce_chrdev_open_exclu = 0;
12214
12215 spin_unlock(&mce_chrdev_state_lock);
12216 @@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12217 static void mce_reset(void)
12218 {
12219 cpu_missing = 0;
12220 - atomic_set(&mce_fake_paniced, 0);
12221 + atomic_set_unchecked(&mce_fake_paniced, 0);
12222 atomic_set(&mce_executing, 0);
12223 atomic_set(&mce_callin, 0);
12224 atomic_set(&global_nwo, 0);
12225 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12226 index 5c0e653..0882b0a 100644
12227 --- a/arch/x86/kernel/cpu/mcheck/p5.c
12228 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
12229 @@ -12,6 +12,7 @@
12230 #include <asm/system.h>
12231 #include <asm/mce.h>
12232 #include <asm/msr.h>
12233 +#include <asm/pgtable.h>
12234
12235 /* By default disabled */
12236 int mce_p5_enabled __read_mostly;
12237 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12238 if (!cpu_has(c, X86_FEATURE_MCE))
12239 return;
12240
12241 + pax_open_kernel();
12242 machine_check_vector = pentium_machine_check;
12243 + pax_close_kernel();
12244 /* Make sure the vector pointer is visible before we enable MCEs: */
12245 wmb();
12246
12247 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12248 index 54060f5..c1a7577 100644
12249 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
12250 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12251 @@ -11,6 +11,7 @@
12252 #include <asm/system.h>
12253 #include <asm/mce.h>
12254 #include <asm/msr.h>
12255 +#include <asm/pgtable.h>
12256
12257 /* Machine check handler for WinChip C6: */
12258 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12259 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12260 {
12261 u32 lo, hi;
12262
12263 + pax_open_kernel();
12264 machine_check_vector = winchip_machine_check;
12265 + pax_close_kernel();
12266 /* Make sure the vector pointer is visible before we enable MCEs: */
12267 wmb();
12268
12269 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12270 index 6b96110..0da73eb 100644
12271 --- a/arch/x86/kernel/cpu/mtrr/main.c
12272 +++ b/arch/x86/kernel/cpu/mtrr/main.c
12273 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12274 u64 size_or_mask, size_and_mask;
12275 static bool mtrr_aps_delayed_init;
12276
12277 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12278 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12279
12280 const struct mtrr_ops *mtrr_if;
12281
12282 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12283 index df5e41f..816c719 100644
12284 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12285 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12286 @@ -25,7 +25,7 @@ struct mtrr_ops {
12287 int (*validate_add_page)(unsigned long base, unsigned long size,
12288 unsigned int type);
12289 int (*have_wrcomb)(void);
12290 -};
12291 +} __do_const;
12292
12293 extern int generic_get_free_region(unsigned long base, unsigned long size,
12294 int replace_reg);
12295 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12296 index 2bda212..78cc605 100644
12297 --- a/arch/x86/kernel/cpu/perf_event.c
12298 +++ b/arch/x86/kernel/cpu/perf_event.c
12299 @@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12300 break;
12301
12302 perf_callchain_store(entry, frame.return_address);
12303 - fp = frame.next_frame;
12304 + fp = (const void __force_user *)frame.next_frame;
12305 }
12306 }
12307
12308 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12309 index 13ad899..f642b9a 100644
12310 --- a/arch/x86/kernel/crash.c
12311 +++ b/arch/x86/kernel/crash.c
12312 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12313 {
12314 #ifdef CONFIG_X86_32
12315 struct pt_regs fixed_regs;
12316 -#endif
12317
12318 -#ifdef CONFIG_X86_32
12319 - if (!user_mode_vm(regs)) {
12320 + if (!user_mode(regs)) {
12321 crash_fixup_ss_esp(&fixed_regs, regs);
12322 regs = &fixed_regs;
12323 }
12324 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12325 index 37250fe..bf2ec74 100644
12326 --- a/arch/x86/kernel/doublefault_32.c
12327 +++ b/arch/x86/kernel/doublefault_32.c
12328 @@ -11,7 +11,7 @@
12329
12330 #define DOUBLEFAULT_STACKSIZE (1024)
12331 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12332 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12333 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12334
12335 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12336
12337 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12338 unsigned long gdt, tss;
12339
12340 store_gdt(&gdt_desc);
12341 - gdt = gdt_desc.address;
12342 + gdt = (unsigned long)gdt_desc.address;
12343
12344 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12345
12346 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12347 /* 0x2 bit is always set */
12348 .flags = X86_EFLAGS_SF | 0x2,
12349 .sp = STACK_START,
12350 - .es = __USER_DS,
12351 + .es = __KERNEL_DS,
12352 .cs = __KERNEL_CS,
12353 .ss = __KERNEL_DS,
12354 - .ds = __USER_DS,
12355 + .ds = __KERNEL_DS,
12356 .fs = __KERNEL_PERCPU,
12357
12358 .__cr3 = __pa_nodebug(swapper_pg_dir),
12359 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12360 index 1aae78f..aab3a3d 100644
12361 --- a/arch/x86/kernel/dumpstack.c
12362 +++ b/arch/x86/kernel/dumpstack.c
12363 @@ -2,6 +2,9 @@
12364 * Copyright (C) 1991, 1992 Linus Torvalds
12365 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12366 */
12367 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12368 +#define __INCLUDED_BY_HIDESYM 1
12369 +#endif
12370 #include <linux/kallsyms.h>
12371 #include <linux/kprobes.h>
12372 #include <linux/uaccess.h>
12373 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12374 static void
12375 print_ftrace_graph_addr(unsigned long addr, void *data,
12376 const struct stacktrace_ops *ops,
12377 - struct thread_info *tinfo, int *graph)
12378 + struct task_struct *task, int *graph)
12379 {
12380 - struct task_struct *task = tinfo->task;
12381 unsigned long ret_addr;
12382 int index = task->curr_ret_stack;
12383
12384 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12385 static inline void
12386 print_ftrace_graph_addr(unsigned long addr, void *data,
12387 const struct stacktrace_ops *ops,
12388 - struct thread_info *tinfo, int *graph)
12389 + struct task_struct *task, int *graph)
12390 { }
12391 #endif
12392
12393 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12394 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12395 */
12396
12397 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12398 - void *p, unsigned int size, void *end)
12399 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12400 {
12401 - void *t = tinfo;
12402 if (end) {
12403 if (p < end && p >= (end-THREAD_SIZE))
12404 return 1;
12405 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12406 }
12407
12408 unsigned long
12409 -print_context_stack(struct thread_info *tinfo,
12410 +print_context_stack(struct task_struct *task, void *stack_start,
12411 unsigned long *stack, unsigned long bp,
12412 const struct stacktrace_ops *ops, void *data,
12413 unsigned long *end, int *graph)
12414 {
12415 struct stack_frame *frame = (struct stack_frame *)bp;
12416
12417 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12418 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12419 unsigned long addr;
12420
12421 addr = *stack;
12422 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12423 } else {
12424 ops->address(data, addr, 0);
12425 }
12426 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12427 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12428 }
12429 stack++;
12430 }
12431 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12432 EXPORT_SYMBOL_GPL(print_context_stack);
12433
12434 unsigned long
12435 -print_context_stack_bp(struct thread_info *tinfo,
12436 +print_context_stack_bp(struct task_struct *task, void *stack_start,
12437 unsigned long *stack, unsigned long bp,
12438 const struct stacktrace_ops *ops, void *data,
12439 unsigned long *end, int *graph)
12440 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12441 struct stack_frame *frame = (struct stack_frame *)bp;
12442 unsigned long *ret_addr = &frame->return_address;
12443
12444 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12445 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12446 unsigned long addr = *ret_addr;
12447
12448 if (!__kernel_text_address(addr))
12449 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12450 ops->address(data, addr, 1);
12451 frame = frame->next_frame;
12452 ret_addr = &frame->return_address;
12453 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12454 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12455 }
12456
12457 return (unsigned long)frame;
12458 @@ -186,7 +186,7 @@ void dump_stack(void)
12459
12460 bp = stack_frame(current, NULL);
12461 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12462 - current->pid, current->comm, print_tainted(),
12463 + task_pid_nr(current), current->comm, print_tainted(),
12464 init_utsname()->release,
12465 (int)strcspn(init_utsname()->version, " "),
12466 init_utsname()->version);
12467 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12468 }
12469 EXPORT_SYMBOL_GPL(oops_begin);
12470
12471 +extern void gr_handle_kernel_exploit(void);
12472 +
12473 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12474 {
12475 if (regs && kexec_should_crash(current))
12476 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12477 panic("Fatal exception in interrupt");
12478 if (panic_on_oops)
12479 panic("Fatal exception");
12480 - do_exit(signr);
12481 +
12482 + gr_handle_kernel_exploit();
12483 +
12484 + do_group_exit(signr);
12485 }
12486
12487 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12488 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12489
12490 show_registers(regs);
12491 #ifdef CONFIG_X86_32
12492 - if (user_mode_vm(regs)) {
12493 + if (user_mode(regs)) {
12494 sp = regs->sp;
12495 ss = regs->ss & 0xffff;
12496 } else {
12497 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12498 unsigned long flags = oops_begin();
12499 int sig = SIGSEGV;
12500
12501 - if (!user_mode_vm(regs))
12502 + if (!user_mode(regs))
12503 report_bug(regs->ip, regs);
12504
12505 if (__die(str, regs, err))
12506 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12507 index c99f9ed..2a15d80 100644
12508 --- a/arch/x86/kernel/dumpstack_32.c
12509 +++ b/arch/x86/kernel/dumpstack_32.c
12510 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12511 bp = stack_frame(task, regs);
12512
12513 for (;;) {
12514 - struct thread_info *context;
12515 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12516
12517 - context = (struct thread_info *)
12518 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12519 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12520 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12521
12522 - stack = (unsigned long *)context->previous_esp;
12523 - if (!stack)
12524 + if (stack_start == task_stack_page(task))
12525 break;
12526 + stack = *(unsigned long **)stack_start;
12527 if (ops->stack(data, "IRQ") < 0)
12528 break;
12529 touch_nmi_watchdog();
12530 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12531 * When in-kernel, we also print out the stack and code at the
12532 * time of the fault..
12533 */
12534 - if (!user_mode_vm(regs)) {
12535 + if (!user_mode(regs)) {
12536 unsigned int code_prologue = code_bytes * 43 / 64;
12537 unsigned int code_len = code_bytes;
12538 unsigned char c;
12539 u8 *ip;
12540 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12541
12542 printk(KERN_EMERG "Stack:\n");
12543 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12544
12545 printk(KERN_EMERG "Code: ");
12546
12547 - ip = (u8 *)regs->ip - code_prologue;
12548 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12549 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12550 /* try starting at IP */
12551 - ip = (u8 *)regs->ip;
12552 + ip = (u8 *)regs->ip + cs_base;
12553 code_len = code_len - code_prologue + 1;
12554 }
12555 for (i = 0; i < code_len; i++, ip++) {
12556 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12557 printk(KERN_CONT " Bad EIP value.");
12558 break;
12559 }
12560 - if (ip == (u8 *)regs->ip)
12561 + if (ip == (u8 *)regs->ip + cs_base)
12562 printk(KERN_CONT "<%02x> ", c);
12563 else
12564 printk(KERN_CONT "%02x ", c);
12565 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12566 {
12567 unsigned short ud2;
12568
12569 + ip = ktla_ktva(ip);
12570 if (ip < PAGE_OFFSET)
12571 return 0;
12572 if (probe_kernel_address((unsigned short *)ip, ud2))
12573 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12574
12575 return ud2 == 0x0b0f;
12576 }
12577 +
12578 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12579 +void pax_check_alloca(unsigned long size)
12580 +{
12581 + unsigned long sp = (unsigned long)&sp, stack_left;
12582 +
12583 + /* all kernel stacks are of the same size */
12584 + stack_left = sp & (THREAD_SIZE - 1);
12585 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12586 +}
12587 +EXPORT_SYMBOL(pax_check_alloca);
12588 +#endif
12589 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12590 index 6d728d9..279514e 100644
12591 --- a/arch/x86/kernel/dumpstack_64.c
12592 +++ b/arch/x86/kernel/dumpstack_64.c
12593 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12594 unsigned long *irq_stack_end =
12595 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12596 unsigned used = 0;
12597 - struct thread_info *tinfo;
12598 int graph = 0;
12599 unsigned long dummy;
12600 + void *stack_start;
12601
12602 if (!task)
12603 task = current;
12604 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12605 * current stack address. If the stacks consist of nested
12606 * exceptions
12607 */
12608 - tinfo = task_thread_info(task);
12609 for (;;) {
12610 char *id;
12611 unsigned long *estack_end;
12612 +
12613 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12614 &used, &id);
12615
12616 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12617 if (ops->stack(data, id) < 0)
12618 break;
12619
12620 - bp = ops->walk_stack(tinfo, stack, bp, ops,
12621 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12622 data, estack_end, &graph);
12623 ops->stack(data, "<EOE>");
12624 /*
12625 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12626 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12627 if (ops->stack(data, "IRQ") < 0)
12628 break;
12629 - bp = ops->walk_stack(tinfo, stack, bp,
12630 + bp = ops->walk_stack(task, irq_stack, stack, bp,
12631 ops, data, irq_stack_end, &graph);
12632 /*
12633 * We link to the next stack (which would be
12634 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12635 /*
12636 * This handles the process stack:
12637 */
12638 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12639 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12640 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12641 put_cpu();
12642 }
12643 EXPORT_SYMBOL(dump_trace);
12644 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12645
12646 return ud2 == 0x0b0f;
12647 }
12648 +
12649 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12650 +void pax_check_alloca(unsigned long size)
12651 +{
12652 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12653 + unsigned cpu, used;
12654 + char *id;
12655 +
12656 + /* check the process stack first */
12657 + stack_start = (unsigned long)task_stack_page(current);
12658 + stack_end = stack_start + THREAD_SIZE;
12659 + if (likely(stack_start <= sp && sp < stack_end)) {
12660 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
12661 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12662 + return;
12663 + }
12664 +
12665 + cpu = get_cpu();
12666 +
12667 + /* check the irq stacks */
12668 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12669 + stack_start = stack_end - IRQ_STACK_SIZE;
12670 + if (stack_start <= sp && sp < stack_end) {
12671 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12672 + put_cpu();
12673 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12674 + return;
12675 + }
12676 +
12677 + /* check the exception stacks */
12678 + used = 0;
12679 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12680 + stack_start = stack_end - EXCEPTION_STKSZ;
12681 + if (stack_end && stack_start <= sp && sp < stack_end) {
12682 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12683 + put_cpu();
12684 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12685 + return;
12686 + }
12687 +
12688 + put_cpu();
12689 +
12690 + /* unknown stack */
12691 + BUG();
12692 +}
12693 +EXPORT_SYMBOL(pax_check_alloca);
12694 +#endif
12695 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12696 index cd28a35..c72ed9a 100644
12697 --- a/arch/x86/kernel/early_printk.c
12698 +++ b/arch/x86/kernel/early_printk.c
12699 @@ -7,6 +7,7 @@
12700 #include <linux/pci_regs.h>
12701 #include <linux/pci_ids.h>
12702 #include <linux/errno.h>
12703 +#include <linux/sched.h>
12704 #include <asm/io.h>
12705 #include <asm/processor.h>
12706 #include <asm/fcntl.h>
12707 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12708 index f3f6f53..0841b66 100644
12709 --- a/arch/x86/kernel/entry_32.S
12710 +++ b/arch/x86/kernel/entry_32.S
12711 @@ -186,13 +186,146 @@
12712 /*CFI_REL_OFFSET gs, PT_GS*/
12713 .endm
12714 .macro SET_KERNEL_GS reg
12715 +
12716 +#ifdef CONFIG_CC_STACKPROTECTOR
12717 movl $(__KERNEL_STACK_CANARY), \reg
12718 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12719 + movl $(__USER_DS), \reg
12720 +#else
12721 + xorl \reg, \reg
12722 +#endif
12723 +
12724 movl \reg, %gs
12725 .endm
12726
12727 #endif /* CONFIG_X86_32_LAZY_GS */
12728
12729 -.macro SAVE_ALL
12730 +.macro pax_enter_kernel
12731 +#ifdef CONFIG_PAX_KERNEXEC
12732 + call pax_enter_kernel
12733 +#endif
12734 +.endm
12735 +
12736 +.macro pax_exit_kernel
12737 +#ifdef CONFIG_PAX_KERNEXEC
12738 + call pax_exit_kernel
12739 +#endif
12740 +.endm
12741 +
12742 +#ifdef CONFIG_PAX_KERNEXEC
12743 +ENTRY(pax_enter_kernel)
12744 +#ifdef CONFIG_PARAVIRT
12745 + pushl %eax
12746 + pushl %ecx
12747 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12748 + mov %eax, %esi
12749 +#else
12750 + mov %cr0, %esi
12751 +#endif
12752 + bts $16, %esi
12753 + jnc 1f
12754 + mov %cs, %esi
12755 + cmp $__KERNEL_CS, %esi
12756 + jz 3f
12757 + ljmp $__KERNEL_CS, $3f
12758 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12759 +2:
12760 +#ifdef CONFIG_PARAVIRT
12761 + mov %esi, %eax
12762 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12763 +#else
12764 + mov %esi, %cr0
12765 +#endif
12766 +3:
12767 +#ifdef CONFIG_PARAVIRT
12768 + popl %ecx
12769 + popl %eax
12770 +#endif
12771 + ret
12772 +ENDPROC(pax_enter_kernel)
12773 +
12774 +ENTRY(pax_exit_kernel)
12775 +#ifdef CONFIG_PARAVIRT
12776 + pushl %eax
12777 + pushl %ecx
12778 +#endif
12779 + mov %cs, %esi
12780 + cmp $__KERNEXEC_KERNEL_CS, %esi
12781 + jnz 2f
12782 +#ifdef CONFIG_PARAVIRT
12783 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12784 + mov %eax, %esi
12785 +#else
12786 + mov %cr0, %esi
12787 +#endif
12788 + btr $16, %esi
12789 + ljmp $__KERNEL_CS, $1f
12790 +1:
12791 +#ifdef CONFIG_PARAVIRT
12792 + mov %esi, %eax
12793 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12794 +#else
12795 + mov %esi, %cr0
12796 +#endif
12797 +2:
12798 +#ifdef CONFIG_PARAVIRT
12799 + popl %ecx
12800 + popl %eax
12801 +#endif
12802 + ret
12803 +ENDPROC(pax_exit_kernel)
12804 +#endif
12805 +
12806 +.macro pax_erase_kstack
12807 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12808 + call pax_erase_kstack
12809 +#endif
12810 +.endm
12811 +
12812 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12813 +/*
12814 + * ebp: thread_info
12815 + * ecx, edx: can be clobbered
12816 + */
12817 +ENTRY(pax_erase_kstack)
12818 + pushl %edi
12819 + pushl %eax
12820 +
12821 + mov TI_lowest_stack(%ebp), %edi
12822 + mov $-0xBEEF, %eax
12823 + std
12824 +
12825 +1: mov %edi, %ecx
12826 + and $THREAD_SIZE_asm - 1, %ecx
12827 + shr $2, %ecx
12828 + repne scasl
12829 + jecxz 2f
12830 +
12831 + cmp $2*16, %ecx
12832 + jc 2f
12833 +
12834 + mov $2*16, %ecx
12835 + repe scasl
12836 + jecxz 2f
12837 + jne 1b
12838 +
12839 +2: cld
12840 + mov %esp, %ecx
12841 + sub %edi, %ecx
12842 + shr $2, %ecx
12843 + rep stosl
12844 +
12845 + mov TI_task_thread_sp0(%ebp), %edi
12846 + sub $128, %edi
12847 + mov %edi, TI_lowest_stack(%ebp)
12848 +
12849 + popl %eax
12850 + popl %edi
12851 + ret
12852 +ENDPROC(pax_erase_kstack)
12853 +#endif
12854 +
12855 +.macro __SAVE_ALL _DS
12856 cld
12857 PUSH_GS
12858 pushl_cfi %fs
12859 @@ -215,7 +348,7 @@
12860 CFI_REL_OFFSET ecx, 0
12861 pushl_cfi %ebx
12862 CFI_REL_OFFSET ebx, 0
12863 - movl $(__USER_DS), %edx
12864 + movl $\_DS, %edx
12865 movl %edx, %ds
12866 movl %edx, %es
12867 movl $(__KERNEL_PERCPU), %edx
12868 @@ -223,6 +356,15 @@
12869 SET_KERNEL_GS %edx
12870 .endm
12871
12872 +.macro SAVE_ALL
12873 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12874 + __SAVE_ALL __KERNEL_DS
12875 + pax_enter_kernel
12876 +#else
12877 + __SAVE_ALL __USER_DS
12878 +#endif
12879 +.endm
12880 +
12881 .macro RESTORE_INT_REGS
12882 popl_cfi %ebx
12883 CFI_RESTORE ebx
12884 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12885 popfl_cfi
12886 jmp syscall_exit
12887 CFI_ENDPROC
12888 -END(ret_from_fork)
12889 +ENDPROC(ret_from_fork)
12890
12891 /*
12892 * Interrupt exit functions should be protected against kprobes
12893 @@ -333,7 +475,15 @@ check_userspace:
12894 movb PT_CS(%esp), %al
12895 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12896 cmpl $USER_RPL, %eax
12897 +
12898 +#ifdef CONFIG_PAX_KERNEXEC
12899 + jae resume_userspace
12900 +
12901 + PAX_EXIT_KERNEL
12902 + jmp resume_kernel
12903 +#else
12904 jb resume_kernel # not returning to v8086 or userspace
12905 +#endif
12906
12907 ENTRY(resume_userspace)
12908 LOCKDEP_SYS_EXIT
12909 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12910 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12911 # int/exception return?
12912 jne work_pending
12913 - jmp restore_all
12914 -END(ret_from_exception)
12915 + jmp restore_all_pax
12916 +ENDPROC(ret_from_exception)
12917
12918 #ifdef CONFIG_PREEMPT
12919 ENTRY(resume_kernel)
12920 @@ -361,7 +511,7 @@ need_resched:
12921 jz restore_all
12922 call preempt_schedule_irq
12923 jmp need_resched
12924 -END(resume_kernel)
12925 +ENDPROC(resume_kernel)
12926 #endif
12927 CFI_ENDPROC
12928 /*
12929 @@ -395,23 +545,34 @@ sysenter_past_esp:
12930 /*CFI_REL_OFFSET cs, 0*/
12931 /*
12932 * Push current_thread_info()->sysenter_return to the stack.
12933 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12934 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
12935 */
12936 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12937 + pushl_cfi $0
12938 CFI_REL_OFFSET eip, 0
12939
12940 pushl_cfi %eax
12941 SAVE_ALL
12942 + GET_THREAD_INFO(%ebp)
12943 + movl TI_sysenter_return(%ebp),%ebp
12944 + movl %ebp,PT_EIP(%esp)
12945 ENABLE_INTERRUPTS(CLBR_NONE)
12946
12947 /*
12948 * Load the potential sixth argument from user stack.
12949 * Careful about security.
12950 */
12951 + movl PT_OLDESP(%esp),%ebp
12952 +
12953 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12954 + mov PT_OLDSS(%esp),%ds
12955 +1: movl %ds:(%ebp),%ebp
12956 + push %ss
12957 + pop %ds
12958 +#else
12959 cmpl $__PAGE_OFFSET-3,%ebp
12960 jae syscall_fault
12961 1: movl (%ebp),%ebp
12962 +#endif
12963 +
12964 movl %ebp,PT_EBP(%esp)
12965 .section __ex_table,"a"
12966 .align 4
12967 @@ -434,12 +595,24 @@ sysenter_do_call:
12968 testl $_TIF_ALLWORK_MASK, %ecx
12969 jne sysexit_audit
12970 sysenter_exit:
12971 +
12972 +#ifdef CONFIG_PAX_RANDKSTACK
12973 + pushl_cfi %eax
12974 + movl %esp, %eax
12975 + call pax_randomize_kstack
12976 + popl_cfi %eax
12977 +#endif
12978 +
12979 + pax_erase_kstack
12980 +
12981 /* if something modifies registers it must also disable sysexit */
12982 movl PT_EIP(%esp), %edx
12983 movl PT_OLDESP(%esp), %ecx
12984 xorl %ebp,%ebp
12985 TRACE_IRQS_ON
12986 1: mov PT_FS(%esp), %fs
12987 +2: mov PT_DS(%esp), %ds
12988 +3: mov PT_ES(%esp), %es
12989 PTGS_TO_GS
12990 ENABLE_INTERRUPTS_SYSEXIT
12991
12992 @@ -456,6 +629,9 @@ sysenter_audit:
12993 movl %eax,%edx /* 2nd arg: syscall number */
12994 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12995 call audit_syscall_entry
12996 +
12997 + pax_erase_kstack
12998 +
12999 pushl_cfi %ebx
13000 movl PT_EAX(%esp),%eax /* reload syscall number */
13001 jmp sysenter_do_call
13002 @@ -482,11 +658,17 @@ sysexit_audit:
13003
13004 CFI_ENDPROC
13005 .pushsection .fixup,"ax"
13006 -2: movl $0,PT_FS(%esp)
13007 +4: movl $0,PT_FS(%esp)
13008 + jmp 1b
13009 +5: movl $0,PT_DS(%esp)
13010 + jmp 1b
13011 +6: movl $0,PT_ES(%esp)
13012 jmp 1b
13013 .section __ex_table,"a"
13014 .align 4
13015 - .long 1b,2b
13016 + .long 1b,4b
13017 + .long 2b,5b
13018 + .long 3b,6b
13019 .popsection
13020 PTGS_TO_GS_EX
13021 ENDPROC(ia32_sysenter_target)
13022 @@ -519,6 +701,15 @@ syscall_exit:
13023 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13024 jne syscall_exit_work
13025
13026 +restore_all_pax:
13027 +
13028 +#ifdef CONFIG_PAX_RANDKSTACK
13029 + movl %esp, %eax
13030 + call pax_randomize_kstack
13031 +#endif
13032 +
13033 + pax_erase_kstack
13034 +
13035 restore_all:
13036 TRACE_IRQS_IRET
13037 restore_all_notrace:
13038 @@ -578,14 +769,34 @@ ldt_ss:
13039 * compensating for the offset by changing to the ESPFIX segment with
13040 * a base address that matches for the difference.
13041 */
13042 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13043 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13044 mov %esp, %edx /* load kernel esp */
13045 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13046 mov %dx, %ax /* eax: new kernel esp */
13047 sub %eax, %edx /* offset (low word is 0) */
13048 +#ifdef CONFIG_SMP
13049 + movl PER_CPU_VAR(cpu_number), %ebx
13050 + shll $PAGE_SHIFT_asm, %ebx
13051 + addl $cpu_gdt_table, %ebx
13052 +#else
13053 + movl $cpu_gdt_table, %ebx
13054 +#endif
13055 shr $16, %edx
13056 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13057 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13058 +
13059 +#ifdef CONFIG_PAX_KERNEXEC
13060 + mov %cr0, %esi
13061 + btr $16, %esi
13062 + mov %esi, %cr0
13063 +#endif
13064 +
13065 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13066 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13067 +
13068 +#ifdef CONFIG_PAX_KERNEXEC
13069 + bts $16, %esi
13070 + mov %esi, %cr0
13071 +#endif
13072 +
13073 pushl_cfi $__ESPFIX_SS
13074 pushl_cfi %eax /* new kernel esp */
13075 /* Disable interrupts, but do not irqtrace this section: we
13076 @@ -614,34 +825,28 @@ work_resched:
13077 movl TI_flags(%ebp), %ecx
13078 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13079 # than syscall tracing?
13080 - jz restore_all
13081 + jz restore_all_pax
13082 testb $_TIF_NEED_RESCHED, %cl
13083 jnz work_resched
13084
13085 work_notifysig: # deal with pending signals and
13086 # notify-resume requests
13087 + movl %esp, %eax
13088 #ifdef CONFIG_VM86
13089 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13090 - movl %esp, %eax
13091 - jne work_notifysig_v86 # returning to kernel-space or
13092 + jz 1f # returning to kernel-space or
13093 # vm86-space
13094 - xorl %edx, %edx
13095 - call do_notify_resume
13096 - jmp resume_userspace_sig
13097
13098 - ALIGN
13099 -work_notifysig_v86:
13100 pushl_cfi %ecx # save ti_flags for do_notify_resume
13101 call save_v86_state # %eax contains pt_regs pointer
13102 popl_cfi %ecx
13103 movl %eax, %esp
13104 -#else
13105 - movl %esp, %eax
13106 +1:
13107 #endif
13108 xorl %edx, %edx
13109 call do_notify_resume
13110 jmp resume_userspace_sig
13111 -END(work_pending)
13112 +ENDPROC(work_pending)
13113
13114 # perform syscall exit tracing
13115 ALIGN
13116 @@ -649,11 +854,14 @@ syscall_trace_entry:
13117 movl $-ENOSYS,PT_EAX(%esp)
13118 movl %esp, %eax
13119 call syscall_trace_enter
13120 +
13121 + pax_erase_kstack
13122 +
13123 /* What it returned is what we'll actually use. */
13124 cmpl $(nr_syscalls), %eax
13125 jnae syscall_call
13126 jmp syscall_exit
13127 -END(syscall_trace_entry)
13128 +ENDPROC(syscall_trace_entry)
13129
13130 # perform syscall exit tracing
13131 ALIGN
13132 @@ -666,20 +874,24 @@ syscall_exit_work:
13133 movl %esp, %eax
13134 call syscall_trace_leave
13135 jmp resume_userspace
13136 -END(syscall_exit_work)
13137 +ENDPROC(syscall_exit_work)
13138 CFI_ENDPROC
13139
13140 RING0_INT_FRAME # can't unwind into user space anyway
13141 syscall_fault:
13142 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13143 + push %ss
13144 + pop %ds
13145 +#endif
13146 GET_THREAD_INFO(%ebp)
13147 movl $-EFAULT,PT_EAX(%esp)
13148 jmp resume_userspace
13149 -END(syscall_fault)
13150 +ENDPROC(syscall_fault)
13151
13152 syscall_badsys:
13153 movl $-ENOSYS,PT_EAX(%esp)
13154 jmp resume_userspace
13155 -END(syscall_badsys)
13156 +ENDPROC(syscall_badsys)
13157 CFI_ENDPROC
13158 /*
13159 * End of kprobes section
13160 @@ -753,6 +965,36 @@ ptregs_clone:
13161 CFI_ENDPROC
13162 ENDPROC(ptregs_clone)
13163
13164 + ALIGN;
13165 +ENTRY(kernel_execve)
13166 + CFI_STARTPROC
13167 + pushl_cfi %ebp
13168 + sub $PT_OLDSS+4,%esp
13169 + pushl_cfi %edi
13170 + pushl_cfi %ecx
13171 + pushl_cfi %eax
13172 + lea 3*4(%esp),%edi
13173 + mov $PT_OLDSS/4+1,%ecx
13174 + xorl %eax,%eax
13175 + rep stosl
13176 + popl_cfi %eax
13177 + popl_cfi %ecx
13178 + popl_cfi %edi
13179 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13180 + pushl_cfi %esp
13181 + call sys_execve
13182 + add $4,%esp
13183 + CFI_ADJUST_CFA_OFFSET -4
13184 + GET_THREAD_INFO(%ebp)
13185 + test %eax,%eax
13186 + jz syscall_exit
13187 + add $PT_OLDSS+4,%esp
13188 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13189 + popl_cfi %ebp
13190 + ret
13191 + CFI_ENDPROC
13192 +ENDPROC(kernel_execve)
13193 +
13194 .macro FIXUP_ESPFIX_STACK
13195 /*
13196 * Switch back for ESPFIX stack to the normal zerobased stack
13197 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13198 * normal stack and adjusts ESP with the matching offset.
13199 */
13200 /* fixup the stack */
13201 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13202 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13203 +#ifdef CONFIG_SMP
13204 + movl PER_CPU_VAR(cpu_number), %ebx
13205 + shll $PAGE_SHIFT_asm, %ebx
13206 + addl $cpu_gdt_table, %ebx
13207 +#else
13208 + movl $cpu_gdt_table, %ebx
13209 +#endif
13210 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13211 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13212 shl $16, %eax
13213 addl %esp, %eax /* the adjusted stack pointer */
13214 pushl_cfi $__KERNEL_DS
13215 @@ -816,7 +1065,7 @@ vector=vector+1
13216 .endr
13217 2: jmp common_interrupt
13218 .endr
13219 -END(irq_entries_start)
13220 +ENDPROC(irq_entries_start)
13221
13222 .previous
13223 END(interrupt)
13224 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13225 pushl_cfi $do_coprocessor_error
13226 jmp error_code
13227 CFI_ENDPROC
13228 -END(coprocessor_error)
13229 +ENDPROC(coprocessor_error)
13230
13231 ENTRY(simd_coprocessor_error)
13232 RING0_INT_FRAME
13233 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13234 #endif
13235 jmp error_code
13236 CFI_ENDPROC
13237 -END(simd_coprocessor_error)
13238 +ENDPROC(simd_coprocessor_error)
13239
13240 ENTRY(device_not_available)
13241 RING0_INT_FRAME
13242 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13243 pushl_cfi $do_device_not_available
13244 jmp error_code
13245 CFI_ENDPROC
13246 -END(device_not_available)
13247 +ENDPROC(device_not_available)
13248
13249 #ifdef CONFIG_PARAVIRT
13250 ENTRY(native_iret)
13251 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
13252 .align 4
13253 .long native_iret, iret_exc
13254 .previous
13255 -END(native_iret)
13256 +ENDPROC(native_iret)
13257
13258 ENTRY(native_irq_enable_sysexit)
13259 sti
13260 sysexit
13261 -END(native_irq_enable_sysexit)
13262 +ENDPROC(native_irq_enable_sysexit)
13263 #endif
13264
13265 ENTRY(overflow)
13266 @@ -916,7 +1165,7 @@ ENTRY(overflow)
13267 pushl_cfi $do_overflow
13268 jmp error_code
13269 CFI_ENDPROC
13270 -END(overflow)
13271 +ENDPROC(overflow)
13272
13273 ENTRY(bounds)
13274 RING0_INT_FRAME
13275 @@ -924,7 +1173,7 @@ ENTRY(bounds)
13276 pushl_cfi $do_bounds
13277 jmp error_code
13278 CFI_ENDPROC
13279 -END(bounds)
13280 +ENDPROC(bounds)
13281
13282 ENTRY(invalid_op)
13283 RING0_INT_FRAME
13284 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13285 pushl_cfi $do_invalid_op
13286 jmp error_code
13287 CFI_ENDPROC
13288 -END(invalid_op)
13289 +ENDPROC(invalid_op)
13290
13291 ENTRY(coprocessor_segment_overrun)
13292 RING0_INT_FRAME
13293 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13294 pushl_cfi $do_coprocessor_segment_overrun
13295 jmp error_code
13296 CFI_ENDPROC
13297 -END(coprocessor_segment_overrun)
13298 +ENDPROC(coprocessor_segment_overrun)
13299
13300 ENTRY(invalid_TSS)
13301 RING0_EC_FRAME
13302 pushl_cfi $do_invalid_TSS
13303 jmp error_code
13304 CFI_ENDPROC
13305 -END(invalid_TSS)
13306 +ENDPROC(invalid_TSS)
13307
13308 ENTRY(segment_not_present)
13309 RING0_EC_FRAME
13310 pushl_cfi $do_segment_not_present
13311 jmp error_code
13312 CFI_ENDPROC
13313 -END(segment_not_present)
13314 +ENDPROC(segment_not_present)
13315
13316 ENTRY(stack_segment)
13317 RING0_EC_FRAME
13318 pushl_cfi $do_stack_segment
13319 jmp error_code
13320 CFI_ENDPROC
13321 -END(stack_segment)
13322 +ENDPROC(stack_segment)
13323
13324 ENTRY(alignment_check)
13325 RING0_EC_FRAME
13326 pushl_cfi $do_alignment_check
13327 jmp error_code
13328 CFI_ENDPROC
13329 -END(alignment_check)
13330 +ENDPROC(alignment_check)
13331
13332 ENTRY(divide_error)
13333 RING0_INT_FRAME
13334 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
13335 pushl_cfi $do_divide_error
13336 jmp error_code
13337 CFI_ENDPROC
13338 -END(divide_error)
13339 +ENDPROC(divide_error)
13340
13341 #ifdef CONFIG_X86_MCE
13342 ENTRY(machine_check)
13343 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
13344 pushl_cfi machine_check_vector
13345 jmp error_code
13346 CFI_ENDPROC
13347 -END(machine_check)
13348 +ENDPROC(machine_check)
13349 #endif
13350
13351 ENTRY(spurious_interrupt_bug)
13352 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13353 pushl_cfi $do_spurious_interrupt_bug
13354 jmp error_code
13355 CFI_ENDPROC
13356 -END(spurious_interrupt_bug)
13357 +ENDPROC(spurious_interrupt_bug)
13358 /*
13359 * End of kprobes section
13360 */
13361 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13362
13363 ENTRY(mcount)
13364 ret
13365 -END(mcount)
13366 +ENDPROC(mcount)
13367
13368 ENTRY(ftrace_caller)
13369 cmpl $0, function_trace_stop
13370 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
13371 .globl ftrace_stub
13372 ftrace_stub:
13373 ret
13374 -END(ftrace_caller)
13375 +ENDPROC(ftrace_caller)
13376
13377 #else /* ! CONFIG_DYNAMIC_FTRACE */
13378
13379 @@ -1174,7 +1423,7 @@ trace:
13380 popl %ecx
13381 popl %eax
13382 jmp ftrace_stub
13383 -END(mcount)
13384 +ENDPROC(mcount)
13385 #endif /* CONFIG_DYNAMIC_FTRACE */
13386 #endif /* CONFIG_FUNCTION_TRACER */
13387
13388 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13389 popl %ecx
13390 popl %eax
13391 ret
13392 -END(ftrace_graph_caller)
13393 +ENDPROC(ftrace_graph_caller)
13394
13395 .globl return_to_handler
13396 return_to_handler:
13397 @@ -1209,7 +1458,6 @@ return_to_handler:
13398 jmp *%ecx
13399 #endif
13400
13401 -.section .rodata,"a"
13402 #include "syscall_table_32.S"
13403
13404 syscall_table_size=(.-sys_call_table)
13405 @@ -1255,15 +1503,18 @@ error_code:
13406 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13407 REG_TO_PTGS %ecx
13408 SET_KERNEL_GS %ecx
13409 - movl $(__USER_DS), %ecx
13410 + movl $(__KERNEL_DS), %ecx
13411 movl %ecx, %ds
13412 movl %ecx, %es
13413 +
13414 + pax_enter_kernel
13415 +
13416 TRACE_IRQS_OFF
13417 movl %esp,%eax # pt_regs pointer
13418 call *%edi
13419 jmp ret_from_exception
13420 CFI_ENDPROC
13421 -END(page_fault)
13422 +ENDPROC(page_fault)
13423
13424 /*
13425 * Debug traps and NMI can happen at the one SYSENTER instruction
13426 @@ -1305,7 +1556,7 @@ debug_stack_correct:
13427 call do_debug
13428 jmp ret_from_exception
13429 CFI_ENDPROC
13430 -END(debug)
13431 +ENDPROC(debug)
13432
13433 /*
13434 * NMI is doubly nasty. It can happen _while_ we're handling
13435 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
13436 xorl %edx,%edx # zero error code
13437 movl %esp,%eax # pt_regs pointer
13438 call do_nmi
13439 +
13440 + pax_exit_kernel
13441 +
13442 jmp restore_all_notrace
13443 CFI_ENDPROC
13444
13445 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13446 FIXUP_ESPFIX_STACK # %eax == %esp
13447 xorl %edx,%edx # zero error code
13448 call do_nmi
13449 +
13450 + pax_exit_kernel
13451 +
13452 RESTORE_REGS
13453 lss 12+4(%esp), %esp # back to espfix stack
13454 CFI_ADJUST_CFA_OFFSET -24
13455 jmp irq_return
13456 CFI_ENDPROC
13457 -END(nmi)
13458 +ENDPROC(nmi)
13459
13460 ENTRY(int3)
13461 RING0_INT_FRAME
13462 @@ -1395,14 +1652,14 @@ ENTRY(int3)
13463 call do_int3
13464 jmp ret_from_exception
13465 CFI_ENDPROC
13466 -END(int3)
13467 +ENDPROC(int3)
13468
13469 ENTRY(general_protection)
13470 RING0_EC_FRAME
13471 pushl_cfi $do_general_protection
13472 jmp error_code
13473 CFI_ENDPROC
13474 -END(general_protection)
13475 +ENDPROC(general_protection)
13476
13477 #ifdef CONFIG_KVM_GUEST
13478 ENTRY(async_page_fault)
13479 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13480 pushl_cfi $do_async_page_fault
13481 jmp error_code
13482 CFI_ENDPROC
13483 -END(async_page_fault)
13484 +ENDPROC(async_page_fault)
13485 #endif
13486
13487 /*
13488 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13489 index faf8d5e..f58c441 100644
13490 --- a/arch/x86/kernel/entry_64.S
13491 +++ b/arch/x86/kernel/entry_64.S
13492 @@ -55,6 +55,8 @@
13493 #include <asm/paravirt.h>
13494 #include <asm/ftrace.h>
13495 #include <asm/percpu.h>
13496 +#include <asm/pgtable.h>
13497 +#include <asm/alternative-asm.h>
13498
13499 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13500 #include <linux/elf-em.h>
13501 @@ -68,8 +70,9 @@
13502 #ifdef CONFIG_FUNCTION_TRACER
13503 #ifdef CONFIG_DYNAMIC_FTRACE
13504 ENTRY(mcount)
13505 + pax_force_retaddr
13506 retq
13507 -END(mcount)
13508 +ENDPROC(mcount)
13509
13510 ENTRY(ftrace_caller)
13511 cmpl $0, function_trace_stop
13512 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13513 #endif
13514
13515 GLOBAL(ftrace_stub)
13516 + pax_force_retaddr
13517 retq
13518 -END(ftrace_caller)
13519 +ENDPROC(ftrace_caller)
13520
13521 #else /* ! CONFIG_DYNAMIC_FTRACE */
13522 ENTRY(mcount)
13523 @@ -112,6 +116,7 @@ ENTRY(mcount)
13524 #endif
13525
13526 GLOBAL(ftrace_stub)
13527 + pax_force_retaddr
13528 retq
13529
13530 trace:
13531 @@ -121,12 +126,13 @@ trace:
13532 movq 8(%rbp), %rsi
13533 subq $MCOUNT_INSN_SIZE, %rdi
13534
13535 + pax_force_fptr ftrace_trace_function
13536 call *ftrace_trace_function
13537
13538 MCOUNT_RESTORE_FRAME
13539
13540 jmp ftrace_stub
13541 -END(mcount)
13542 +ENDPROC(mcount)
13543 #endif /* CONFIG_DYNAMIC_FTRACE */
13544 #endif /* CONFIG_FUNCTION_TRACER */
13545
13546 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13547
13548 MCOUNT_RESTORE_FRAME
13549
13550 + pax_force_retaddr
13551 retq
13552 -END(ftrace_graph_caller)
13553 +ENDPROC(ftrace_graph_caller)
13554
13555 GLOBAL(return_to_handler)
13556 subq $24, %rsp
13557 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13558 movq 8(%rsp), %rdx
13559 movq (%rsp), %rax
13560 addq $24, %rsp
13561 + pax_force_fptr %rdi
13562 jmp *%rdi
13563 #endif
13564
13565 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13566 ENDPROC(native_usergs_sysret64)
13567 #endif /* CONFIG_PARAVIRT */
13568
13569 + .macro ljmpq sel, off
13570 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13571 + .byte 0x48; ljmp *1234f(%rip)
13572 + .pushsection .rodata
13573 + .align 16
13574 + 1234: .quad \off; .word \sel
13575 + .popsection
13576 +#else
13577 + pushq $\sel
13578 + pushq $\off
13579 + lretq
13580 +#endif
13581 + .endm
13582 +
13583 + .macro pax_enter_kernel
13584 + pax_set_fptr_mask
13585 +#ifdef CONFIG_PAX_KERNEXEC
13586 + call pax_enter_kernel
13587 +#endif
13588 + .endm
13589 +
13590 + .macro pax_exit_kernel
13591 +#ifdef CONFIG_PAX_KERNEXEC
13592 + call pax_exit_kernel
13593 +#endif
13594 + .endm
13595 +
13596 +#ifdef CONFIG_PAX_KERNEXEC
13597 +ENTRY(pax_enter_kernel)
13598 + pushq %rdi
13599 +
13600 +#ifdef CONFIG_PARAVIRT
13601 + PV_SAVE_REGS(CLBR_RDI)
13602 +#endif
13603 +
13604 + GET_CR0_INTO_RDI
13605 + bts $16,%rdi
13606 + jnc 3f
13607 + mov %cs,%edi
13608 + cmp $__KERNEL_CS,%edi
13609 + jnz 2f
13610 +1:
13611 +
13612 +#ifdef CONFIG_PARAVIRT
13613 + PV_RESTORE_REGS(CLBR_RDI)
13614 +#endif
13615 +
13616 + popq %rdi
13617 + pax_force_retaddr
13618 + retq
13619 +
13620 +2: ljmpq __KERNEL_CS,1f
13621 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
13622 +4: SET_RDI_INTO_CR0
13623 + jmp 1b
13624 +ENDPROC(pax_enter_kernel)
13625 +
13626 +ENTRY(pax_exit_kernel)
13627 + pushq %rdi
13628 +
13629 +#ifdef CONFIG_PARAVIRT
13630 + PV_SAVE_REGS(CLBR_RDI)
13631 +#endif
13632 +
13633 + mov %cs,%rdi
13634 + cmp $__KERNEXEC_KERNEL_CS,%edi
13635 + jz 2f
13636 +1:
13637 +
13638 +#ifdef CONFIG_PARAVIRT
13639 + PV_RESTORE_REGS(CLBR_RDI);
13640 +#endif
13641 +
13642 + popq %rdi
13643 + pax_force_retaddr
13644 + retq
13645 +
13646 +2: GET_CR0_INTO_RDI
13647 + btr $16,%rdi
13648 + ljmpq __KERNEL_CS,3f
13649 +3: SET_RDI_INTO_CR0
13650 + jmp 1b
13651 +#ifdef CONFIG_PARAVIRT
13652 + PV_RESTORE_REGS(CLBR_RDI);
13653 +#endif
13654 +
13655 + popq %rdi
13656 + pax_force_retaddr
13657 + retq
13658 +ENDPROC(pax_exit_kernel)
13659 +#endif
13660 +
13661 + .macro pax_enter_kernel_user
13662 + pax_set_fptr_mask
13663 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13664 + call pax_enter_kernel_user
13665 +#endif
13666 + .endm
13667 +
13668 + .macro pax_exit_kernel_user
13669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13670 + call pax_exit_kernel_user
13671 +#endif
13672 +#ifdef CONFIG_PAX_RANDKSTACK
13673 + pushq %rax
13674 + call pax_randomize_kstack
13675 + popq %rax
13676 +#endif
13677 + .endm
13678 +
13679 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13680 +ENTRY(pax_enter_kernel_user)
13681 + pushq %rdi
13682 + pushq %rbx
13683 +
13684 +#ifdef CONFIG_PARAVIRT
13685 + PV_SAVE_REGS(CLBR_RDI)
13686 +#endif
13687 +
13688 + GET_CR3_INTO_RDI
13689 + mov %rdi,%rbx
13690 + add $__START_KERNEL_map,%rbx
13691 + sub phys_base(%rip),%rbx
13692 +
13693 +#ifdef CONFIG_PARAVIRT
13694 + pushq %rdi
13695 + cmpl $0, pv_info+PARAVIRT_enabled
13696 + jz 1f
13697 + i = 0
13698 + .rept USER_PGD_PTRS
13699 + mov i*8(%rbx),%rsi
13700 + mov $0,%sil
13701 + lea i*8(%rbx),%rdi
13702 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13703 + i = i + 1
13704 + .endr
13705 + jmp 2f
13706 +1:
13707 +#endif
13708 +
13709 + i = 0
13710 + .rept USER_PGD_PTRS
13711 + movb $0,i*8(%rbx)
13712 + i = i + 1
13713 + .endr
13714 +
13715 +#ifdef CONFIG_PARAVIRT
13716 +2: popq %rdi
13717 +#endif
13718 + SET_RDI_INTO_CR3
13719 +
13720 +#ifdef CONFIG_PAX_KERNEXEC
13721 + GET_CR0_INTO_RDI
13722 + bts $16,%rdi
13723 + SET_RDI_INTO_CR0
13724 +#endif
13725 +
13726 +#ifdef CONFIG_PARAVIRT
13727 + PV_RESTORE_REGS(CLBR_RDI)
13728 +#endif
13729 +
13730 + popq %rbx
13731 + popq %rdi
13732 + pax_force_retaddr
13733 + retq
13734 +ENDPROC(pax_enter_kernel_user)
13735 +
13736 +ENTRY(pax_exit_kernel_user)
13737 + push %rdi
13738 +
13739 +#ifdef CONFIG_PARAVIRT
13740 + pushq %rbx
13741 + PV_SAVE_REGS(CLBR_RDI)
13742 +#endif
13743 +
13744 +#ifdef CONFIG_PAX_KERNEXEC
13745 + GET_CR0_INTO_RDI
13746 + btr $16,%rdi
13747 + SET_RDI_INTO_CR0
13748 +#endif
13749 +
13750 + GET_CR3_INTO_RDI
13751 + add $__START_KERNEL_map,%rdi
13752 + sub phys_base(%rip),%rdi
13753 +
13754 +#ifdef CONFIG_PARAVIRT
13755 + cmpl $0, pv_info+PARAVIRT_enabled
13756 + jz 1f
13757 + mov %rdi,%rbx
13758 + i = 0
13759 + .rept USER_PGD_PTRS
13760 + mov i*8(%rbx),%rsi
13761 + mov $0x67,%sil
13762 + lea i*8(%rbx),%rdi
13763 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13764 + i = i + 1
13765 + .endr
13766 + jmp 2f
13767 +1:
13768 +#endif
13769 +
13770 + i = 0
13771 + .rept USER_PGD_PTRS
13772 + movb $0x67,i*8(%rdi)
13773 + i = i + 1
13774 + .endr
13775 +
13776 +#ifdef CONFIG_PARAVIRT
13777 +2: PV_RESTORE_REGS(CLBR_RDI)
13778 + popq %rbx
13779 +#endif
13780 +
13781 + popq %rdi
13782 + pax_force_retaddr
13783 + retq
13784 +ENDPROC(pax_exit_kernel_user)
13785 +#endif
13786 +
13787 +.macro pax_erase_kstack
13788 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13789 + call pax_erase_kstack
13790 +#endif
13791 +.endm
13792 +
13793 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13794 +/*
13795 + * r11: thread_info
13796 + * rcx, rdx: can be clobbered
13797 + */
13798 +ENTRY(pax_erase_kstack)
13799 + pushq %rdi
13800 + pushq %rax
13801 + pushq %r11
13802 +
13803 + GET_THREAD_INFO(%r11)
13804 + mov TI_lowest_stack(%r11), %rdi
13805 + mov $-0xBEEF, %rax
13806 + std
13807 +
13808 +1: mov %edi, %ecx
13809 + and $THREAD_SIZE_asm - 1, %ecx
13810 + shr $3, %ecx
13811 + repne scasq
13812 + jecxz 2f
13813 +
13814 + cmp $2*8, %ecx
13815 + jc 2f
13816 +
13817 + mov $2*8, %ecx
13818 + repe scasq
13819 + jecxz 2f
13820 + jne 1b
13821 +
13822 +2: cld
13823 + mov %esp, %ecx
13824 + sub %edi, %ecx
13825 +
13826 + cmp $THREAD_SIZE_asm, %rcx
13827 + jb 3f
13828 + ud2
13829 +3:
13830 +
13831 + shr $3, %ecx
13832 + rep stosq
13833 +
13834 + mov TI_task_thread_sp0(%r11), %rdi
13835 + sub $256, %rdi
13836 + mov %rdi, TI_lowest_stack(%r11)
13837 +
13838 + popq %r11
13839 + popq %rax
13840 + popq %rdi
13841 + pax_force_retaddr
13842 + ret
13843 +ENDPROC(pax_erase_kstack)
13844 +#endif
13845
13846 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13847 #ifdef CONFIG_TRACE_IRQFLAGS
13848 @@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13849 .endm
13850
13851 .macro UNFAKE_STACK_FRAME
13852 - addq $8*6, %rsp
13853 - CFI_ADJUST_CFA_OFFSET -(6*8)
13854 + addq $8*6 + ARG_SKIP, %rsp
13855 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13856 .endm
13857
13858 /*
13859 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13860 movq %rsp, %rsi
13861
13862 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13863 - testl $3, CS(%rdi)
13864 + testb $3, CS(%rdi)
13865 je 1f
13866 SWAPGS
13867 /*
13868 @@ -355,9 +639,10 @@ ENTRY(save_rest)
13869 movq_cfi r15, R15+16
13870 movq %r11, 8(%rsp) /* return address */
13871 FIXUP_TOP_OF_STACK %r11, 16
13872 + pax_force_retaddr
13873 ret
13874 CFI_ENDPROC
13875 -END(save_rest)
13876 +ENDPROC(save_rest)
13877
13878 /* save complete stack frame */
13879 .pushsection .kprobes.text, "ax"
13880 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
13881 js 1f /* negative -> in kernel */
13882 SWAPGS
13883 xorl %ebx,%ebx
13884 -1: ret
13885 +1: pax_force_retaddr_bts
13886 + ret
13887 CFI_ENDPROC
13888 -END(save_paranoid)
13889 +ENDPROC(save_paranoid)
13890 .popsection
13891
13892 /*
13893 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
13894
13895 RESTORE_REST
13896
13897 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13898 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13899 je int_ret_from_sys_call
13900
13901 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13902 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
13903 jmp ret_from_sys_call # go to the SYSRET fastpath
13904
13905 CFI_ENDPROC
13906 -END(ret_from_fork)
13907 +ENDPROC(ret_from_fork)
13908
13909 /*
13910 * System call entry. Up to 6 arguments in registers are supported.
13911 @@ -456,7 +742,7 @@ END(ret_from_fork)
13912 ENTRY(system_call)
13913 CFI_STARTPROC simple
13914 CFI_SIGNAL_FRAME
13915 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13916 + CFI_DEF_CFA rsp,0
13917 CFI_REGISTER rip,rcx
13918 /*CFI_REGISTER rflags,r11*/
13919 SWAPGS_UNSAFE_STACK
13920 @@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
13921
13922 movq %rsp,PER_CPU_VAR(old_rsp)
13923 movq PER_CPU_VAR(kernel_stack),%rsp
13924 + SAVE_ARGS 8*6,0
13925 + pax_enter_kernel_user
13926 /*
13927 * No need to follow this irqs off/on section - it's straight
13928 * and short:
13929 */
13930 ENABLE_INTERRUPTS(CLBR_NONE)
13931 - SAVE_ARGS 8,0
13932 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13933 movq %rcx,RIP-ARGOFFSET(%rsp)
13934 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13935 @@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
13936 system_call_fastpath:
13937 cmpq $__NR_syscall_max,%rax
13938 ja badsys
13939 - movq %r10,%rcx
13940 + movq R10-ARGOFFSET(%rsp),%rcx
13941 call *sys_call_table(,%rax,8) # XXX: rip relative
13942 movq %rax,RAX-ARGOFFSET(%rsp)
13943 /*
13944 @@ -503,6 +790,8 @@ sysret_check:
13945 andl %edi,%edx
13946 jnz sysret_careful
13947 CFI_REMEMBER_STATE
13948 + pax_exit_kernel_user
13949 + pax_erase_kstack
13950 /*
13951 * sysretq will re-enable interrupts:
13952 */
13953 @@ -554,14 +843,18 @@ badsys:
13954 * jump back to the normal fast path.
13955 */
13956 auditsys:
13957 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
13958 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13959 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13960 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13961 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13962 movq %rax,%rsi /* 2nd arg: syscall number */
13963 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13964 call audit_syscall_entry
13965 +
13966 + pax_erase_kstack
13967 +
13968 LOAD_ARGS 0 /* reload call-clobbered registers */
13969 + pax_set_fptr_mask
13970 jmp system_call_fastpath
13971
13972 /*
13973 @@ -591,16 +884,20 @@ tracesys:
13974 FIXUP_TOP_OF_STACK %rdi
13975 movq %rsp,%rdi
13976 call syscall_trace_enter
13977 +
13978 + pax_erase_kstack
13979 +
13980 /*
13981 * Reload arg registers from stack in case ptrace changed them.
13982 * We don't reload %rax because syscall_trace_enter() returned
13983 * the value it wants us to use in the table lookup.
13984 */
13985 LOAD_ARGS ARGOFFSET, 1
13986 + pax_set_fptr_mask
13987 RESTORE_REST
13988 cmpq $__NR_syscall_max,%rax
13989 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13990 - movq %r10,%rcx /* fixup for C */
13991 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13992 call *sys_call_table(,%rax,8)
13993 movq %rax,RAX-ARGOFFSET(%rsp)
13994 /* Use IRET because user could have changed frame */
13995 @@ -612,7 +909,7 @@ tracesys:
13996 GLOBAL(int_ret_from_sys_call)
13997 DISABLE_INTERRUPTS(CLBR_NONE)
13998 TRACE_IRQS_OFF
13999 - testl $3,CS-ARGOFFSET(%rsp)
14000 + testb $3,CS-ARGOFFSET(%rsp)
14001 je retint_restore_args
14002 movl $_TIF_ALLWORK_MASK,%edi
14003 /* edi: mask to check */
14004 @@ -669,7 +966,7 @@ int_restore_rest:
14005 TRACE_IRQS_OFF
14006 jmp int_with_check
14007 CFI_ENDPROC
14008 -END(system_call)
14009 +ENDPROC(system_call)
14010
14011 /*
14012 * Certain special system calls that need to save a complete full stack frame.
14013 @@ -685,7 +982,7 @@ ENTRY(\label)
14014 call \func
14015 jmp ptregscall_common
14016 CFI_ENDPROC
14017 -END(\label)
14018 +ENDPROC(\label)
14019 .endm
14020
14021 PTREGSCALL stub_clone, sys_clone, %r8
14022 @@ -703,9 +1000,10 @@ ENTRY(ptregscall_common)
14023 movq_cfi_restore R12+8, r12
14024 movq_cfi_restore RBP+8, rbp
14025 movq_cfi_restore RBX+8, rbx
14026 + pax_force_retaddr
14027 ret $REST_SKIP /* pop extended registers */
14028 CFI_ENDPROC
14029 -END(ptregscall_common)
14030 +ENDPROC(ptregscall_common)
14031
14032 ENTRY(stub_execve)
14033 CFI_STARTPROC
14034 @@ -720,7 +1018,7 @@ ENTRY(stub_execve)
14035 RESTORE_REST
14036 jmp int_ret_from_sys_call
14037 CFI_ENDPROC
14038 -END(stub_execve)
14039 +ENDPROC(stub_execve)
14040
14041 /*
14042 * sigreturn is special because it needs to restore all registers on return.
14043 @@ -738,7 +1036,7 @@ ENTRY(stub_rt_sigreturn)
14044 RESTORE_REST
14045 jmp int_ret_from_sys_call
14046 CFI_ENDPROC
14047 -END(stub_rt_sigreturn)
14048 +ENDPROC(stub_rt_sigreturn)
14049
14050 /*
14051 * Build the entry stubs and pointer table with some assembler magic.
14052 @@ -773,7 +1071,7 @@ vector=vector+1
14053 2: jmp common_interrupt
14054 .endr
14055 CFI_ENDPROC
14056 -END(irq_entries_start)
14057 +ENDPROC(irq_entries_start)
14058
14059 .previous
14060 END(interrupt)
14061 @@ -793,6 +1091,16 @@ END(interrupt)
14062 subq $ORIG_RAX-RBP, %rsp
14063 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14064 SAVE_ARGS_IRQ
14065 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14066 + testb $3, CS(%rdi)
14067 + jnz 1f
14068 + pax_enter_kernel
14069 + jmp 2f
14070 +1: pax_enter_kernel_user
14071 +2:
14072 +#else
14073 + pax_enter_kernel
14074 +#endif
14075 call \func
14076 .endm
14077
14078 @@ -824,7 +1132,7 @@ ret_from_intr:
14079
14080 exit_intr:
14081 GET_THREAD_INFO(%rcx)
14082 - testl $3,CS-ARGOFFSET(%rsp)
14083 + testb $3,CS-ARGOFFSET(%rsp)
14084 je retint_kernel
14085
14086 /* Interrupt came from user space */
14087 @@ -846,12 +1154,16 @@ retint_swapgs: /* return to user-space */
14088 * The iretq could re-enable interrupts:
14089 */
14090 DISABLE_INTERRUPTS(CLBR_ANY)
14091 + pax_exit_kernel_user
14092 + pax_erase_kstack
14093 TRACE_IRQS_IRETQ
14094 SWAPGS
14095 jmp restore_args
14096
14097 retint_restore_args: /* return to kernel space */
14098 DISABLE_INTERRUPTS(CLBR_ANY)
14099 + pax_exit_kernel
14100 + pax_force_retaddr RIP-ARGOFFSET
14101 /*
14102 * The iretq could re-enable interrupts:
14103 */
14104 @@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14105 #endif
14106
14107 CFI_ENDPROC
14108 -END(common_interrupt)
14109 +ENDPROC(common_interrupt)
14110 /*
14111 * End of kprobes section
14112 */
14113 @@ -956,7 +1268,7 @@ ENTRY(\sym)
14114 interrupt \do_sym
14115 jmp ret_from_intr
14116 CFI_ENDPROC
14117 -END(\sym)
14118 +ENDPROC(\sym)
14119 .endm
14120
14121 #ifdef CONFIG_SMP
14122 @@ -1021,12 +1333,22 @@ ENTRY(\sym)
14123 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14124 call error_entry
14125 DEFAULT_FRAME 0
14126 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14127 + testb $3, CS(%rsp)
14128 + jnz 1f
14129 + pax_enter_kernel
14130 + jmp 2f
14131 +1: pax_enter_kernel_user
14132 +2:
14133 +#else
14134 + pax_enter_kernel
14135 +#endif
14136 movq %rsp,%rdi /* pt_regs pointer */
14137 xorl %esi,%esi /* no error code */
14138 call \do_sym
14139 jmp error_exit /* %ebx: no swapgs flag */
14140 CFI_ENDPROC
14141 -END(\sym)
14142 +ENDPROC(\sym)
14143 .endm
14144
14145 .macro paranoidzeroentry sym do_sym
14146 @@ -1038,15 +1360,25 @@ ENTRY(\sym)
14147 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14148 call save_paranoid
14149 TRACE_IRQS_OFF
14150 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14151 + testb $3, CS(%rsp)
14152 + jnz 1f
14153 + pax_enter_kernel
14154 + jmp 2f
14155 +1: pax_enter_kernel_user
14156 +2:
14157 +#else
14158 + pax_enter_kernel
14159 +#endif
14160 movq %rsp,%rdi /* pt_regs pointer */
14161 xorl %esi,%esi /* no error code */
14162 call \do_sym
14163 jmp paranoid_exit /* %ebx: no swapgs flag */
14164 CFI_ENDPROC
14165 -END(\sym)
14166 +ENDPROC(\sym)
14167 .endm
14168
14169 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14170 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14171 .macro paranoidzeroentry_ist sym do_sym ist
14172 ENTRY(\sym)
14173 INTR_FRAME
14174 @@ -1056,14 +1388,30 @@ ENTRY(\sym)
14175 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14176 call save_paranoid
14177 TRACE_IRQS_OFF
14178 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14179 + testb $3, CS(%rsp)
14180 + jnz 1f
14181 + pax_enter_kernel
14182 + jmp 2f
14183 +1: pax_enter_kernel_user
14184 +2:
14185 +#else
14186 + pax_enter_kernel
14187 +#endif
14188 movq %rsp,%rdi /* pt_regs pointer */
14189 xorl %esi,%esi /* no error code */
14190 +#ifdef CONFIG_SMP
14191 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14192 + lea init_tss(%r12), %r12
14193 +#else
14194 + lea init_tss(%rip), %r12
14195 +#endif
14196 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14197 call \do_sym
14198 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14199 jmp paranoid_exit /* %ebx: no swapgs flag */
14200 CFI_ENDPROC
14201 -END(\sym)
14202 +ENDPROC(\sym)
14203 .endm
14204
14205 .macro errorentry sym do_sym
14206 @@ -1074,13 +1422,23 @@ ENTRY(\sym)
14207 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14208 call error_entry
14209 DEFAULT_FRAME 0
14210 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14211 + testb $3, CS(%rsp)
14212 + jnz 1f
14213 + pax_enter_kernel
14214 + jmp 2f
14215 +1: pax_enter_kernel_user
14216 +2:
14217 +#else
14218 + pax_enter_kernel
14219 +#endif
14220 movq %rsp,%rdi /* pt_regs pointer */
14221 movq ORIG_RAX(%rsp),%rsi /* get error code */
14222 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14223 call \do_sym
14224 jmp error_exit /* %ebx: no swapgs flag */
14225 CFI_ENDPROC
14226 -END(\sym)
14227 +ENDPROC(\sym)
14228 .endm
14229
14230 /* error code is on the stack already */
14231 @@ -1093,13 +1451,23 @@ ENTRY(\sym)
14232 call save_paranoid
14233 DEFAULT_FRAME 0
14234 TRACE_IRQS_OFF
14235 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14236 + testb $3, CS(%rsp)
14237 + jnz 1f
14238 + pax_enter_kernel
14239 + jmp 2f
14240 +1: pax_enter_kernel_user
14241 +2:
14242 +#else
14243 + pax_enter_kernel
14244 +#endif
14245 movq %rsp,%rdi /* pt_regs pointer */
14246 movq ORIG_RAX(%rsp),%rsi /* get error code */
14247 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14248 call \do_sym
14249 jmp paranoid_exit /* %ebx: no swapgs flag */
14250 CFI_ENDPROC
14251 -END(\sym)
14252 +ENDPROC(\sym)
14253 .endm
14254
14255 zeroentry divide_error do_divide_error
14256 @@ -1129,9 +1497,10 @@ gs_change:
14257 2: mfence /* workaround */
14258 SWAPGS
14259 popfq_cfi
14260 + pax_force_retaddr
14261 ret
14262 CFI_ENDPROC
14263 -END(native_load_gs_index)
14264 +ENDPROC(native_load_gs_index)
14265
14266 .section __ex_table,"a"
14267 .align 8
14268 @@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14269 * Here we are in the child and the registers are set as they were
14270 * at kernel_thread() invocation in the parent.
14271 */
14272 + pax_force_fptr %rsi
14273 call *%rsi
14274 # exit
14275 mov %eax, %edi
14276 call do_exit
14277 ud2 # padding for call trace
14278 CFI_ENDPROC
14279 -END(kernel_thread_helper)
14280 +ENDPROC(kernel_thread_helper)
14281
14282 /*
14283 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14284 @@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14285 RESTORE_REST
14286 testq %rax,%rax
14287 je int_ret_from_sys_call
14288 - RESTORE_ARGS
14289 UNFAKE_STACK_FRAME
14290 + pax_force_retaddr
14291 ret
14292 CFI_ENDPROC
14293 -END(kernel_execve)
14294 +ENDPROC(kernel_execve)
14295
14296 /* Call softirq on interrupt stack. Interrupts are off. */
14297 ENTRY(call_softirq)
14298 @@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14299 CFI_DEF_CFA_REGISTER rsp
14300 CFI_ADJUST_CFA_OFFSET -8
14301 decl PER_CPU_VAR(irq_count)
14302 + pax_force_retaddr
14303 ret
14304 CFI_ENDPROC
14305 -END(call_softirq)
14306 +ENDPROC(call_softirq)
14307
14308 #ifdef CONFIG_XEN
14309 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14310 @@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14311 decl PER_CPU_VAR(irq_count)
14312 jmp error_exit
14313 CFI_ENDPROC
14314 -END(xen_do_hypervisor_callback)
14315 +ENDPROC(xen_do_hypervisor_callback)
14316
14317 /*
14318 * Hypervisor uses this for application faults while it executes.
14319 @@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14320 SAVE_ALL
14321 jmp error_exit
14322 CFI_ENDPROC
14323 -END(xen_failsafe_callback)
14324 +ENDPROC(xen_failsafe_callback)
14325
14326 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14327 xen_hvm_callback_vector xen_evtchn_do_upcall
14328 @@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14329 TRACE_IRQS_OFF
14330 testl %ebx,%ebx /* swapgs needed? */
14331 jnz paranoid_restore
14332 - testl $3,CS(%rsp)
14333 + testb $3,CS(%rsp)
14334 jnz paranoid_userspace
14335 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14336 + pax_exit_kernel
14337 + TRACE_IRQS_IRETQ 0
14338 + SWAPGS_UNSAFE_STACK
14339 + RESTORE_ALL 8
14340 + pax_force_retaddr_bts
14341 + jmp irq_return
14342 +#endif
14343 paranoid_swapgs:
14344 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14345 + pax_exit_kernel_user
14346 +#else
14347 + pax_exit_kernel
14348 +#endif
14349 TRACE_IRQS_IRETQ 0
14350 SWAPGS_UNSAFE_STACK
14351 RESTORE_ALL 8
14352 jmp irq_return
14353 paranoid_restore:
14354 + pax_exit_kernel
14355 TRACE_IRQS_IRETQ 0
14356 RESTORE_ALL 8
14357 + pax_force_retaddr_bts
14358 jmp irq_return
14359 paranoid_userspace:
14360 GET_THREAD_INFO(%rcx)
14361 @@ -1394,7 +1780,7 @@ paranoid_schedule:
14362 TRACE_IRQS_OFF
14363 jmp paranoid_userspace
14364 CFI_ENDPROC
14365 -END(paranoid_exit)
14366 +ENDPROC(paranoid_exit)
14367
14368 /*
14369 * Exception entry point. This expects an error code/orig_rax on the stack.
14370 @@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14371 movq_cfi r14, R14+8
14372 movq_cfi r15, R15+8
14373 xorl %ebx,%ebx
14374 - testl $3,CS+8(%rsp)
14375 + testb $3,CS+8(%rsp)
14376 je error_kernelspace
14377 error_swapgs:
14378 SWAPGS
14379 error_sti:
14380 TRACE_IRQS_OFF
14381 + pax_force_retaddr_bts
14382 ret
14383
14384 /*
14385 @@ -1453,7 +1840,7 @@ bstep_iret:
14386 movq %rcx,RIP+8(%rsp)
14387 jmp error_swapgs
14388 CFI_ENDPROC
14389 -END(error_entry)
14390 +ENDPROC(error_entry)
14391
14392
14393 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14394 @@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14395 jnz retint_careful
14396 jmp retint_swapgs
14397 CFI_ENDPROC
14398 -END(error_exit)
14399 +ENDPROC(error_exit)
14400
14401
14402 /* runs on exception stack */
14403 @@ -1485,6 +1872,16 @@ ENTRY(nmi)
14404 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14405 call save_paranoid
14406 DEFAULT_FRAME 0
14407 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14408 + testb $3, CS(%rsp)
14409 + jnz 1f
14410 + pax_enter_kernel
14411 + jmp 2f
14412 +1: pax_enter_kernel_user
14413 +2:
14414 +#else
14415 + pax_enter_kernel
14416 +#endif
14417 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14418 movq %rsp,%rdi
14419 movq $-1,%rsi
14420 @@ -1495,12 +1892,28 @@ ENTRY(nmi)
14421 DISABLE_INTERRUPTS(CLBR_NONE)
14422 testl %ebx,%ebx /* swapgs needed? */
14423 jnz nmi_restore
14424 - testl $3,CS(%rsp)
14425 + testb $3,CS(%rsp)
14426 jnz nmi_userspace
14427 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14428 + pax_exit_kernel
14429 + SWAPGS_UNSAFE_STACK
14430 + RESTORE_ALL 8
14431 + pax_force_retaddr_bts
14432 + jmp irq_return
14433 +#endif
14434 nmi_swapgs:
14435 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14436 + pax_exit_kernel_user
14437 +#else
14438 + pax_exit_kernel
14439 +#endif
14440 SWAPGS_UNSAFE_STACK
14441 + RESTORE_ALL 8
14442 + jmp irq_return
14443 nmi_restore:
14444 + pax_exit_kernel
14445 RESTORE_ALL 8
14446 + pax_force_retaddr_bts
14447 jmp irq_return
14448 nmi_userspace:
14449 GET_THREAD_INFO(%rcx)
14450 @@ -1529,14 +1942,14 @@ nmi_schedule:
14451 jmp paranoid_exit
14452 CFI_ENDPROC
14453 #endif
14454 -END(nmi)
14455 +ENDPROC(nmi)
14456
14457 ENTRY(ignore_sysret)
14458 CFI_STARTPROC
14459 mov $-ENOSYS,%eax
14460 sysret
14461 CFI_ENDPROC
14462 -END(ignore_sysret)
14463 +ENDPROC(ignore_sysret)
14464
14465 /*
14466 * End of kprobes section
14467 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14468 index c9a281f..ce2f317 100644
14469 --- a/arch/x86/kernel/ftrace.c
14470 +++ b/arch/x86/kernel/ftrace.c
14471 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14472 static const void *mod_code_newcode; /* holds the text to write to the IP */
14473
14474 static unsigned nmi_wait_count;
14475 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14476 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14477
14478 int ftrace_arch_read_dyn_info(char *buf, int size)
14479 {
14480 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14481
14482 r = snprintf(buf, size, "%u %u",
14483 nmi_wait_count,
14484 - atomic_read(&nmi_update_count));
14485 + atomic_read_unchecked(&nmi_update_count));
14486 return r;
14487 }
14488
14489 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14490
14491 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14492 smp_rmb();
14493 + pax_open_kernel();
14494 ftrace_mod_code();
14495 - atomic_inc(&nmi_update_count);
14496 + pax_close_kernel();
14497 + atomic_inc_unchecked(&nmi_update_count);
14498 }
14499 /* Must have previous changes seen before executions */
14500 smp_mb();
14501 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14502 {
14503 unsigned char replaced[MCOUNT_INSN_SIZE];
14504
14505 + ip = ktla_ktva(ip);
14506 +
14507 /*
14508 * Note: Due to modules and __init, code can
14509 * disappear and change, we need to protect against faulting
14510 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14511 unsigned char old[MCOUNT_INSN_SIZE], *new;
14512 int ret;
14513
14514 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14515 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14516 new = ftrace_call_replace(ip, (unsigned long)func);
14517 ret = ftrace_modify_code(ip, old, new);
14518
14519 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14520 {
14521 unsigned char code[MCOUNT_INSN_SIZE];
14522
14523 + ip = ktla_ktva(ip);
14524 +
14525 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14526 return -EFAULT;
14527
14528 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14529 index 3bb0850..55a56f4 100644
14530 --- a/arch/x86/kernel/head32.c
14531 +++ b/arch/x86/kernel/head32.c
14532 @@ -19,6 +19,7 @@
14533 #include <asm/io_apic.h>
14534 #include <asm/bios_ebda.h>
14535 #include <asm/tlbflush.h>
14536 +#include <asm/boot.h>
14537
14538 static void __init i386_default_early_setup(void)
14539 {
14540 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14541 {
14542 memblock_init();
14543
14544 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14545 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14546
14547 #ifdef CONFIG_BLK_DEV_INITRD
14548 /* Reserve INITRD */
14549 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14550 index ce0be7c..c41476e 100644
14551 --- a/arch/x86/kernel/head_32.S
14552 +++ b/arch/x86/kernel/head_32.S
14553 @@ -25,6 +25,12 @@
14554 /* Physical address */
14555 #define pa(X) ((X) - __PAGE_OFFSET)
14556
14557 +#ifdef CONFIG_PAX_KERNEXEC
14558 +#define ta(X) (X)
14559 +#else
14560 +#define ta(X) ((X) - __PAGE_OFFSET)
14561 +#endif
14562 +
14563 /*
14564 * References to members of the new_cpu_data structure.
14565 */
14566 @@ -54,11 +60,7 @@
14567 * and small than max_low_pfn, otherwise will waste some page table entries
14568 */
14569
14570 -#if PTRS_PER_PMD > 1
14571 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14572 -#else
14573 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14574 -#endif
14575 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14576
14577 /* Number of possible pages in the lowmem region */
14578 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14579 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14580 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14581
14582 /*
14583 + * Real beginning of normal "text" segment
14584 + */
14585 +ENTRY(stext)
14586 +ENTRY(_stext)
14587 +
14588 +/*
14589 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14590 * %esi points to the real-mode code as a 32-bit pointer.
14591 * CS and DS must be 4 GB flat segments, but we don't depend on
14592 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14593 * can.
14594 */
14595 __HEAD
14596 +
14597 +#ifdef CONFIG_PAX_KERNEXEC
14598 + jmp startup_32
14599 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14600 +.fill PAGE_SIZE-5,1,0xcc
14601 +#endif
14602 +
14603 ENTRY(startup_32)
14604 movl pa(stack_start),%ecx
14605
14606 @@ -105,6 +120,57 @@ ENTRY(startup_32)
14607 2:
14608 leal -__PAGE_OFFSET(%ecx),%esp
14609
14610 +#ifdef CONFIG_SMP
14611 + movl $pa(cpu_gdt_table),%edi
14612 + movl $__per_cpu_load,%eax
14613 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14614 + rorl $16,%eax
14615 + movb %al,__KERNEL_PERCPU + 4(%edi)
14616 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14617 + movl $__per_cpu_end - 1,%eax
14618 + subl $__per_cpu_start,%eax
14619 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14620 +#endif
14621 +
14622 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14623 + movl $NR_CPUS,%ecx
14624 + movl $pa(cpu_gdt_table),%edi
14625 +1:
14626 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14627 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14628 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14629 + addl $PAGE_SIZE_asm,%edi
14630 + loop 1b
14631 +#endif
14632 +
14633 +#ifdef CONFIG_PAX_KERNEXEC
14634 + movl $pa(boot_gdt),%edi
14635 + movl $__LOAD_PHYSICAL_ADDR,%eax
14636 + movw %ax,__BOOT_CS + 2(%edi)
14637 + rorl $16,%eax
14638 + movb %al,__BOOT_CS + 4(%edi)
14639 + movb %ah,__BOOT_CS + 7(%edi)
14640 + rorl $16,%eax
14641 +
14642 + ljmp $(__BOOT_CS),$1f
14643 +1:
14644 +
14645 + movl $NR_CPUS,%ecx
14646 + movl $pa(cpu_gdt_table),%edi
14647 + addl $__PAGE_OFFSET,%eax
14648 +1:
14649 + movw %ax,__KERNEL_CS + 2(%edi)
14650 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14651 + rorl $16,%eax
14652 + movb %al,__KERNEL_CS + 4(%edi)
14653 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14654 + movb %ah,__KERNEL_CS + 7(%edi)
14655 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14656 + rorl $16,%eax
14657 + addl $PAGE_SIZE_asm,%edi
14658 + loop 1b
14659 +#endif
14660 +
14661 /*
14662 * Clear BSS first so that there are no surprises...
14663 */
14664 @@ -195,8 +261,11 @@ ENTRY(startup_32)
14665 movl %eax, pa(max_pfn_mapped)
14666
14667 /* Do early initialization of the fixmap area */
14668 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14669 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14670 +#ifdef CONFIG_COMPAT_VDSO
14671 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14672 +#else
14673 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14674 +#endif
14675 #else /* Not PAE */
14676
14677 page_pde_offset = (__PAGE_OFFSET >> 20);
14678 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14679 movl %eax, pa(max_pfn_mapped)
14680
14681 /* Do early initialization of the fixmap area */
14682 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14683 - movl %eax,pa(initial_page_table+0xffc)
14684 +#ifdef CONFIG_COMPAT_VDSO
14685 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14686 +#else
14687 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14688 +#endif
14689 #endif
14690
14691 #ifdef CONFIG_PARAVIRT
14692 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14693 cmpl $num_subarch_entries, %eax
14694 jae bad_subarch
14695
14696 - movl pa(subarch_entries)(,%eax,4), %eax
14697 - subl $__PAGE_OFFSET, %eax
14698 - jmp *%eax
14699 + jmp *pa(subarch_entries)(,%eax,4)
14700
14701 bad_subarch:
14702 WEAK(lguest_entry)
14703 @@ -255,10 +325,10 @@ WEAK(xen_entry)
14704 __INITDATA
14705
14706 subarch_entries:
14707 - .long default_entry /* normal x86/PC */
14708 - .long lguest_entry /* lguest hypervisor */
14709 - .long xen_entry /* Xen hypervisor */
14710 - .long default_entry /* Moorestown MID */
14711 + .long ta(default_entry) /* normal x86/PC */
14712 + .long ta(lguest_entry) /* lguest hypervisor */
14713 + .long ta(xen_entry) /* Xen hypervisor */
14714 + .long ta(default_entry) /* Moorestown MID */
14715 num_subarch_entries = (. - subarch_entries) / 4
14716 .previous
14717 #else
14718 @@ -312,6 +382,7 @@ default_entry:
14719 orl %edx,%eax
14720 movl %eax,%cr4
14721
14722 +#ifdef CONFIG_X86_PAE
14723 testb $X86_CR4_PAE, %al # check if PAE is enabled
14724 jz 6f
14725
14726 @@ -340,6 +411,9 @@ default_entry:
14727 /* Make changes effective */
14728 wrmsr
14729
14730 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14731 +#endif
14732 +
14733 6:
14734
14735 /*
14736 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14737 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14738 movl %eax,%ss # after changing gdt.
14739
14740 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14741 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14742 movl %eax,%ds
14743 movl %eax,%es
14744
14745 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14746 */
14747 cmpb $0,ready
14748 jne 1f
14749 - movl $gdt_page,%eax
14750 + movl $cpu_gdt_table,%eax
14751 movl $stack_canary,%ecx
14752 +#ifdef CONFIG_SMP
14753 + addl $__per_cpu_load,%ecx
14754 +#endif
14755 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14756 shrl $16, %ecx
14757 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14758 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14759 1:
14760 -#endif
14761 movl $(__KERNEL_STACK_CANARY),%eax
14762 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14763 + movl $(__USER_DS),%eax
14764 +#else
14765 + xorl %eax,%eax
14766 +#endif
14767 movl %eax,%gs
14768
14769 xorl %eax,%eax # Clear LDT
14770 @@ -558,22 +639,22 @@ early_page_fault:
14771 jmp early_fault
14772
14773 early_fault:
14774 - cld
14775 #ifdef CONFIG_PRINTK
14776 + cmpl $1,%ss:early_recursion_flag
14777 + je hlt_loop
14778 + incl %ss:early_recursion_flag
14779 + cld
14780 pusha
14781 movl $(__KERNEL_DS),%eax
14782 movl %eax,%ds
14783 movl %eax,%es
14784 - cmpl $2,early_recursion_flag
14785 - je hlt_loop
14786 - incl early_recursion_flag
14787 movl %cr2,%eax
14788 pushl %eax
14789 pushl %edx /* trapno */
14790 pushl $fault_msg
14791 call printk
14792 +; call dump_stack
14793 #endif
14794 - call dump_stack
14795 hlt_loop:
14796 hlt
14797 jmp hlt_loop
14798 @@ -581,8 +662,11 @@ hlt_loop:
14799 /* This is the default interrupt "handler" :-) */
14800 ALIGN
14801 ignore_int:
14802 - cld
14803 #ifdef CONFIG_PRINTK
14804 + cmpl $2,%ss:early_recursion_flag
14805 + je hlt_loop
14806 + incl %ss:early_recursion_flag
14807 + cld
14808 pushl %eax
14809 pushl %ecx
14810 pushl %edx
14811 @@ -591,9 +675,6 @@ ignore_int:
14812 movl $(__KERNEL_DS),%eax
14813 movl %eax,%ds
14814 movl %eax,%es
14815 - cmpl $2,early_recursion_flag
14816 - je hlt_loop
14817 - incl early_recursion_flag
14818 pushl 16(%esp)
14819 pushl 24(%esp)
14820 pushl 32(%esp)
14821 @@ -622,29 +703,43 @@ ENTRY(initial_code)
14822 /*
14823 * BSS section
14824 */
14825 -__PAGE_ALIGNED_BSS
14826 - .align PAGE_SIZE
14827 #ifdef CONFIG_X86_PAE
14828 +.section .initial_pg_pmd,"a",@progbits
14829 initial_pg_pmd:
14830 .fill 1024*KPMDS,4,0
14831 #else
14832 +.section .initial_page_table,"a",@progbits
14833 ENTRY(initial_page_table)
14834 .fill 1024,4,0
14835 #endif
14836 +.section .initial_pg_fixmap,"a",@progbits
14837 initial_pg_fixmap:
14838 .fill 1024,4,0
14839 +.section .empty_zero_page,"a",@progbits
14840 ENTRY(empty_zero_page)
14841 .fill 4096,1,0
14842 +.section .swapper_pg_dir,"a",@progbits
14843 ENTRY(swapper_pg_dir)
14844 +#ifdef CONFIG_X86_PAE
14845 + .fill 4,8,0
14846 +#else
14847 .fill 1024,4,0
14848 +#endif
14849 +
14850 +/*
14851 + * The IDT has to be page-aligned to simplify the Pentium
14852 + * F0 0F bug workaround.. We have a special link segment
14853 + * for this.
14854 + */
14855 +.section .idt,"a",@progbits
14856 +ENTRY(idt_table)
14857 + .fill 256,8,0
14858
14859 /*
14860 * This starts the data section.
14861 */
14862 #ifdef CONFIG_X86_PAE
14863 -__PAGE_ALIGNED_DATA
14864 - /* Page-aligned for the benefit of paravirt? */
14865 - .align PAGE_SIZE
14866 +.section .initial_page_table,"a",@progbits
14867 ENTRY(initial_page_table)
14868 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14869 # if KPMDS == 3
14870 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14871 # error "Kernel PMDs should be 1, 2 or 3"
14872 # endif
14873 .align PAGE_SIZE /* needs to be page-sized too */
14874 +
14875 +#ifdef CONFIG_PAX_PER_CPU_PGD
14876 +ENTRY(cpu_pgd)
14877 + .rept NR_CPUS
14878 + .fill 4,8,0
14879 + .endr
14880 +#endif
14881 +
14882 #endif
14883
14884 .data
14885 .balign 4
14886 ENTRY(stack_start)
14887 - .long init_thread_union+THREAD_SIZE
14888 + .long init_thread_union+THREAD_SIZE-8
14889
14890 +ready: .byte 0
14891 +
14892 +.section .rodata,"a",@progbits
14893 early_recursion_flag:
14894 .long 0
14895
14896 -ready: .byte 0
14897 -
14898 int_msg:
14899 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14900
14901 @@ -707,7 +811,7 @@ fault_msg:
14902 .word 0 # 32 bit align gdt_desc.address
14903 boot_gdt_descr:
14904 .word __BOOT_DS+7
14905 - .long boot_gdt - __PAGE_OFFSET
14906 + .long pa(boot_gdt)
14907
14908 .word 0 # 32-bit align idt_desc.address
14909 idt_descr:
14910 @@ -718,7 +822,7 @@ idt_descr:
14911 .word 0 # 32 bit align gdt_desc.address
14912 ENTRY(early_gdt_descr)
14913 .word GDT_ENTRIES*8-1
14914 - .long gdt_page /* Overwritten for secondary CPUs */
14915 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14916
14917 /*
14918 * The boot_gdt must mirror the equivalent in setup.S and is
14919 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14920 .align L1_CACHE_BYTES
14921 ENTRY(boot_gdt)
14922 .fill GDT_ENTRY_BOOT_CS,8,0
14923 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14924 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14925 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14926 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14927 +
14928 + .align PAGE_SIZE_asm
14929 +ENTRY(cpu_gdt_table)
14930 + .rept NR_CPUS
14931 + .quad 0x0000000000000000 /* NULL descriptor */
14932 + .quad 0x0000000000000000 /* 0x0b reserved */
14933 + .quad 0x0000000000000000 /* 0x13 reserved */
14934 + .quad 0x0000000000000000 /* 0x1b reserved */
14935 +
14936 +#ifdef CONFIG_PAX_KERNEXEC
14937 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14938 +#else
14939 + .quad 0x0000000000000000 /* 0x20 unused */
14940 +#endif
14941 +
14942 + .quad 0x0000000000000000 /* 0x28 unused */
14943 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14944 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14945 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14946 + .quad 0x0000000000000000 /* 0x4b reserved */
14947 + .quad 0x0000000000000000 /* 0x53 reserved */
14948 + .quad 0x0000000000000000 /* 0x5b reserved */
14949 +
14950 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14951 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14952 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14953 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14954 +
14955 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14956 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14957 +
14958 + /*
14959 + * Segments used for calling PnP BIOS have byte granularity.
14960 + * The code segments and data segments have fixed 64k limits,
14961 + * the transfer segment sizes are set at run time.
14962 + */
14963 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14964 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14965 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14966 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14967 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14968 +
14969 + /*
14970 + * The APM segments have byte granularity and their bases
14971 + * are set at run time. All have 64k limits.
14972 + */
14973 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14974 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14975 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14976 +
14977 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14978 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14979 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14980 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14981 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14982 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14983 +
14984 + /* Be sure this is zeroed to avoid false validations in Xen */
14985 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14986 + .endr
14987 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
14988 index e11e394..9aebc5d 100644
14989 --- a/arch/x86/kernel/head_64.S
14990 +++ b/arch/x86/kernel/head_64.S
14991 @@ -19,6 +19,8 @@
14992 #include <asm/cache.h>
14993 #include <asm/processor-flags.h>
14994 #include <asm/percpu.h>
14995 +#include <asm/cpufeature.h>
14996 +#include <asm/alternative-asm.h>
14997
14998 #ifdef CONFIG_PARAVIRT
14999 #include <asm/asm-offsets.h>
15000 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15001 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15002 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15003 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15004 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
15005 +L3_VMALLOC_START = pud_index(VMALLOC_START)
15006 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
15007 +L3_VMALLOC_END = pud_index(VMALLOC_END)
15008 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15009 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15010
15011 .text
15012 __HEAD
15013 @@ -85,35 +93,23 @@ startup_64:
15014 */
15015 addq %rbp, init_level4_pgt + 0(%rip)
15016 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15017 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15018 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15019 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15020 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15021
15022 addq %rbp, level3_ident_pgt + 0(%rip)
15023 +#ifndef CONFIG_XEN
15024 + addq %rbp, level3_ident_pgt + 8(%rip)
15025 +#endif
15026
15027 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15028 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15029 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15030 +
15031 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15032 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15033
15034 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15035 -
15036 - /* Add an Identity mapping if I am above 1G */
15037 - leaq _text(%rip), %rdi
15038 - andq $PMD_PAGE_MASK, %rdi
15039 -
15040 - movq %rdi, %rax
15041 - shrq $PUD_SHIFT, %rax
15042 - andq $(PTRS_PER_PUD - 1), %rax
15043 - jz ident_complete
15044 -
15045 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15046 - leaq level3_ident_pgt(%rip), %rbx
15047 - movq %rdx, 0(%rbx, %rax, 8)
15048 -
15049 - movq %rdi, %rax
15050 - shrq $PMD_SHIFT, %rax
15051 - andq $(PTRS_PER_PMD - 1), %rax
15052 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15053 - leaq level2_spare_pgt(%rip), %rbx
15054 - movq %rdx, 0(%rbx, %rax, 8)
15055 -ident_complete:
15056 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15057
15058 /*
15059 * Fixup the kernel text+data virtual addresses. Note that
15060 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15061 * after the boot processor executes this code.
15062 */
15063
15064 - /* Enable PAE mode and PGE */
15065 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15066 + /* Enable PAE mode and PSE/PGE */
15067 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15068 movq %rax, %cr4
15069
15070 /* Setup early boot stage 4 level pagetables. */
15071 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15072 movl $MSR_EFER, %ecx
15073 rdmsr
15074 btsl $_EFER_SCE, %eax /* Enable System Call */
15075 - btl $20,%edi /* No Execute supported? */
15076 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15077 jnc 1f
15078 btsl $_EFER_NX, %eax
15079 + leaq init_level4_pgt(%rip), %rdi
15080 +#ifndef CONFIG_EFI
15081 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15082 +#endif
15083 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15084 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15085 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15086 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15087 1: wrmsr /* Make changes effective */
15088
15089 /* Setup cr0 */
15090 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15091 * jump. In addition we need to ensure %cs is set so we make this
15092 * a far return.
15093 */
15094 + pax_set_fptr_mask
15095 movq initial_code(%rip),%rax
15096 pushq $0 # fake return address to stop unwinder
15097 pushq $__KERNEL_CS # set correct cs
15098 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15099 bad_address:
15100 jmp bad_address
15101
15102 - .section ".init.text","ax"
15103 + __INIT
15104 #ifdef CONFIG_EARLY_PRINTK
15105 .globl early_idt_handlers
15106 early_idt_handlers:
15107 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15108 #endif /* EARLY_PRINTK */
15109 1: hlt
15110 jmp 1b
15111 + .previous
15112
15113 #ifdef CONFIG_EARLY_PRINTK
15114 + __INITDATA
15115 early_recursion_flag:
15116 .long 0
15117 + .previous
15118
15119 + .section .rodata,"a",@progbits
15120 early_idt_msg:
15121 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15122 early_idt_ripmsg:
15123 .asciz "RIP %s\n"
15124 + .previous
15125 #endif /* CONFIG_EARLY_PRINTK */
15126 - .previous
15127
15128 + .section .rodata,"a",@progbits
15129 #define NEXT_PAGE(name) \
15130 .balign PAGE_SIZE; \
15131 ENTRY(name)
15132 @@ -338,7 +348,6 @@ ENTRY(name)
15133 i = i + 1 ; \
15134 .endr
15135
15136 - .data
15137 /*
15138 * This default setting generates an ident mapping at address 0x100000
15139 * and a mapping for the kernel that precisely maps virtual address
15140 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15141 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15142 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15143 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15144 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15145 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15146 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
15147 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15148 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15149 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15150 .org init_level4_pgt + L4_START_KERNEL*8, 0
15151 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15152 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15153
15154 +#ifdef CONFIG_PAX_PER_CPU_PGD
15155 +NEXT_PAGE(cpu_pgd)
15156 + .rept NR_CPUS
15157 + .fill 512,8,0
15158 + .endr
15159 +#endif
15160 +
15161 NEXT_PAGE(level3_ident_pgt)
15162 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15163 +#ifdef CONFIG_XEN
15164 .fill 511,8,0
15165 +#else
15166 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15167 + .fill 510,8,0
15168 +#endif
15169 +
15170 +NEXT_PAGE(level3_vmalloc_start_pgt)
15171 + .fill 512,8,0
15172 +
15173 +NEXT_PAGE(level3_vmalloc_end_pgt)
15174 + .fill 512,8,0
15175 +
15176 +NEXT_PAGE(level3_vmemmap_pgt)
15177 + .fill L3_VMEMMAP_START,8,0
15178 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15179
15180 NEXT_PAGE(level3_kernel_pgt)
15181 .fill L3_START_KERNEL,8,0
15182 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15183 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15184 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15185
15186 +NEXT_PAGE(level2_vmemmap_pgt)
15187 + .fill 512,8,0
15188 +
15189 NEXT_PAGE(level2_fixmap_pgt)
15190 - .fill 506,8,0
15191 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15192 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15193 - .fill 5,8,0
15194 + .fill 507,8,0
15195 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15196 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15197 + .fill 4,8,0
15198
15199 -NEXT_PAGE(level1_fixmap_pgt)
15200 +NEXT_PAGE(level1_vsyscall_pgt)
15201 .fill 512,8,0
15202
15203 -NEXT_PAGE(level2_ident_pgt)
15204 - /* Since I easily can, map the first 1G.
15205 + /* Since I easily can, map the first 2G.
15206 * Don't set NX because code runs from these pages.
15207 */
15208 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15209 +NEXT_PAGE(level2_ident_pgt)
15210 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15211
15212 NEXT_PAGE(level2_kernel_pgt)
15213 /*
15214 @@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15215 * If you want to increase this then increase MODULES_VADDR
15216 * too.)
15217 */
15218 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15219 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15220 -
15221 -NEXT_PAGE(level2_spare_pgt)
15222 - .fill 512, 8, 0
15223 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15224
15225 #undef PMDS
15226 #undef NEXT_PAGE
15227
15228 - .data
15229 + .align PAGE_SIZE
15230 +ENTRY(cpu_gdt_table)
15231 + .rept NR_CPUS
15232 + .quad 0x0000000000000000 /* NULL descriptor */
15233 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15234 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15235 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15236 + .quad 0x00cffb000000ffff /* __USER32_CS */
15237 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15238 + .quad 0x00affb000000ffff /* __USER_CS */
15239 +
15240 +#ifdef CONFIG_PAX_KERNEXEC
15241 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15242 +#else
15243 + .quad 0x0 /* unused */
15244 +#endif
15245 +
15246 + .quad 0,0 /* TSS */
15247 + .quad 0,0 /* LDT */
15248 + .quad 0,0,0 /* three TLS descriptors */
15249 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15250 + /* asm/segment.h:GDT_ENTRIES must match this */
15251 +
15252 + /* zero the remaining page */
15253 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15254 + .endr
15255 +
15256 .align 16
15257 .globl early_gdt_descr
15258 early_gdt_descr:
15259 .word GDT_ENTRIES*8-1
15260 early_gdt_descr_base:
15261 - .quad INIT_PER_CPU_VAR(gdt_page)
15262 + .quad cpu_gdt_table
15263
15264 ENTRY(phys_base)
15265 /* This must match the first entry in level2_kernel_pgt */
15266 .quad 0x0000000000000000
15267
15268 #include "../../x86/xen/xen-head.S"
15269 -
15270 - .section .bss, "aw", @nobits
15271 +
15272 + .section .rodata,"a",@progbits
15273 .align L1_CACHE_BYTES
15274 ENTRY(idt_table)
15275 - .skip IDT_ENTRIES * 16
15276 + .fill 512,8,0
15277
15278 __PAGE_ALIGNED_BSS
15279 .align PAGE_SIZE
15280 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15281 index 9c3bd4a..e1d9b35 100644
15282 --- a/arch/x86/kernel/i386_ksyms_32.c
15283 +++ b/arch/x86/kernel/i386_ksyms_32.c
15284 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15285 EXPORT_SYMBOL(cmpxchg8b_emu);
15286 #endif
15287
15288 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15289 +
15290 /* Networking helper routines. */
15291 EXPORT_SYMBOL(csum_partial_copy_generic);
15292 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15293 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15294
15295 EXPORT_SYMBOL(__get_user_1);
15296 EXPORT_SYMBOL(__get_user_2);
15297 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15298
15299 EXPORT_SYMBOL(csum_partial);
15300 EXPORT_SYMBOL(empty_zero_page);
15301 +
15302 +#ifdef CONFIG_PAX_KERNEXEC
15303 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15304 +#endif
15305 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15306 index 6104852..6114160 100644
15307 --- a/arch/x86/kernel/i8259.c
15308 +++ b/arch/x86/kernel/i8259.c
15309 @@ -210,7 +210,7 @@ spurious_8259A_irq:
15310 "spurious 8259A interrupt: IRQ%d.\n", irq);
15311 spurious_irq_mask |= irqmask;
15312 }
15313 - atomic_inc(&irq_err_count);
15314 + atomic_inc_unchecked(&irq_err_count);
15315 /*
15316 * Theoretically we do not have to handle this IRQ,
15317 * but in Linux this does not cause problems and is
15318 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15319 index 43e9ccf..44ccf6f 100644
15320 --- a/arch/x86/kernel/init_task.c
15321 +++ b/arch/x86/kernel/init_task.c
15322 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15323 * way process stacks are handled. This is done by having a special
15324 * "init_task" linker map entry..
15325 */
15326 -union thread_union init_thread_union __init_task_data =
15327 - { INIT_THREAD_INFO(init_task) };
15328 +union thread_union init_thread_union __init_task_data;
15329
15330 /*
15331 * Initial task structure.
15332 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15333 * section. Since TSS's are completely CPU-local, we want them
15334 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15335 */
15336 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15337 -
15338 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15339 +EXPORT_SYMBOL(init_tss);
15340 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15341 index 8c96897..be66bfa 100644
15342 --- a/arch/x86/kernel/ioport.c
15343 +++ b/arch/x86/kernel/ioport.c
15344 @@ -6,6 +6,7 @@
15345 #include <linux/sched.h>
15346 #include <linux/kernel.h>
15347 #include <linux/capability.h>
15348 +#include <linux/security.h>
15349 #include <linux/errno.h>
15350 #include <linux/types.h>
15351 #include <linux/ioport.h>
15352 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15353
15354 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15355 return -EINVAL;
15356 +#ifdef CONFIG_GRKERNSEC_IO
15357 + if (turn_on && grsec_disable_privio) {
15358 + gr_handle_ioperm();
15359 + return -EPERM;
15360 + }
15361 +#endif
15362 if (turn_on && !capable(CAP_SYS_RAWIO))
15363 return -EPERM;
15364
15365 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15366 * because the ->io_bitmap_max value must match the bitmap
15367 * contents:
15368 */
15369 - tss = &per_cpu(init_tss, get_cpu());
15370 + tss = init_tss + get_cpu();
15371
15372 if (turn_on)
15373 bitmap_clear(t->io_bitmap_ptr, from, num);
15374 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15375 return -EINVAL;
15376 /* Trying to gain more privileges? */
15377 if (level > old) {
15378 +#ifdef CONFIG_GRKERNSEC_IO
15379 + if (grsec_disable_privio) {
15380 + gr_handle_iopl();
15381 + return -EPERM;
15382 + }
15383 +#endif
15384 if (!capable(CAP_SYS_RAWIO))
15385 return -EPERM;
15386 }
15387 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15388 index 429e0c9..17b3ece 100644
15389 --- a/arch/x86/kernel/irq.c
15390 +++ b/arch/x86/kernel/irq.c
15391 @@ -18,7 +18,7 @@
15392 #include <asm/mce.h>
15393 #include <asm/hw_irq.h>
15394
15395 -atomic_t irq_err_count;
15396 +atomic_unchecked_t irq_err_count;
15397
15398 /* Function pointer for generic interrupt vector handling */
15399 void (*x86_platform_ipi_callback)(void) = NULL;
15400 @@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15401 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15402 seq_printf(p, " Machine check polls\n");
15403 #endif
15404 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15405 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15406 #if defined(CONFIG_X86_IO_APIC)
15407 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15408 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15409 #endif
15410 return 0;
15411 }
15412 @@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15413
15414 u64 arch_irq_stat(void)
15415 {
15416 - u64 sum = atomic_read(&irq_err_count);
15417 + u64 sum = atomic_read_unchecked(&irq_err_count);
15418
15419 #ifdef CONFIG_X86_IO_APIC
15420 - sum += atomic_read(&irq_mis_count);
15421 + sum += atomic_read_unchecked(&irq_mis_count);
15422 #endif
15423 return sum;
15424 }
15425 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15426 index 7209070..cbcd71a 100644
15427 --- a/arch/x86/kernel/irq_32.c
15428 +++ b/arch/x86/kernel/irq_32.c
15429 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15430 __asm__ __volatile__("andl %%esp,%0" :
15431 "=r" (sp) : "0" (THREAD_SIZE - 1));
15432
15433 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15434 + return sp < STACK_WARN;
15435 }
15436
15437 static void print_stack_overflow(void)
15438 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15439 * per-CPU IRQ handling contexts (thread information and stack)
15440 */
15441 union irq_ctx {
15442 - struct thread_info tinfo;
15443 - u32 stack[THREAD_SIZE/sizeof(u32)];
15444 + unsigned long previous_esp;
15445 + u32 stack[THREAD_SIZE/sizeof(u32)];
15446 } __attribute__((aligned(THREAD_SIZE)));
15447
15448 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15449 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15450 static inline int
15451 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15452 {
15453 - union irq_ctx *curctx, *irqctx;
15454 + union irq_ctx *irqctx;
15455 u32 *isp, arg1, arg2;
15456
15457 - curctx = (union irq_ctx *) current_thread_info();
15458 irqctx = __this_cpu_read(hardirq_ctx);
15459
15460 /*
15461 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15462 * handler) we can't do that and just have to keep using the
15463 * current stack (which is the irq stack already after all)
15464 */
15465 - if (unlikely(curctx == irqctx))
15466 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15467 return 0;
15468
15469 /* build the stack frame on the IRQ stack */
15470 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15471 - irqctx->tinfo.task = curctx->tinfo.task;
15472 - irqctx->tinfo.previous_esp = current_stack_pointer;
15473 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15474 + irqctx->previous_esp = current_stack_pointer;
15475
15476 - /*
15477 - * Copy the softirq bits in preempt_count so that the
15478 - * softirq checks work in the hardirq context.
15479 - */
15480 - irqctx->tinfo.preempt_count =
15481 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15482 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15483 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15484 + __set_fs(MAKE_MM_SEG(0));
15485 +#endif
15486
15487 if (unlikely(overflow))
15488 call_on_stack(print_stack_overflow, isp);
15489 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15490 : "0" (irq), "1" (desc), "2" (isp),
15491 "D" (desc->handle_irq)
15492 : "memory", "cc", "ecx");
15493 +
15494 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15495 + __set_fs(current_thread_info()->addr_limit);
15496 +#endif
15497 +
15498 return 1;
15499 }
15500
15501 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15502 */
15503 void __cpuinit irq_ctx_init(int cpu)
15504 {
15505 - union irq_ctx *irqctx;
15506 -
15507 if (per_cpu(hardirq_ctx, cpu))
15508 return;
15509
15510 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15511 - THREAD_FLAGS,
15512 - THREAD_ORDER));
15513 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15514 - irqctx->tinfo.cpu = cpu;
15515 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15516 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15517 -
15518 - per_cpu(hardirq_ctx, cpu) = irqctx;
15519 -
15520 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15521 - THREAD_FLAGS,
15522 - THREAD_ORDER));
15523 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15524 - irqctx->tinfo.cpu = cpu;
15525 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15526 -
15527 - per_cpu(softirq_ctx, cpu) = irqctx;
15528 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15529 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15530
15531 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15532 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15533 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15534 asmlinkage void do_softirq(void)
15535 {
15536 unsigned long flags;
15537 - struct thread_info *curctx;
15538 union irq_ctx *irqctx;
15539 u32 *isp;
15540
15541 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15542 local_irq_save(flags);
15543
15544 if (local_softirq_pending()) {
15545 - curctx = current_thread_info();
15546 irqctx = __this_cpu_read(softirq_ctx);
15547 - irqctx->tinfo.task = curctx->task;
15548 - irqctx->tinfo.previous_esp = current_stack_pointer;
15549 + irqctx->previous_esp = current_stack_pointer;
15550
15551 /* build the stack frame on the softirq stack */
15552 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15553 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15554 +
15555 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15556 + __set_fs(MAKE_MM_SEG(0));
15557 +#endif
15558
15559 call_on_stack(__do_softirq, isp);
15560 +
15561 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15562 + __set_fs(current_thread_info()->addr_limit);
15563 +#endif
15564 +
15565 /*
15566 * Shouldn't happen, we returned above if in_interrupt():
15567 */
15568 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15569 index 69bca46..0bac999 100644
15570 --- a/arch/x86/kernel/irq_64.c
15571 +++ b/arch/x86/kernel/irq_64.c
15572 @@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15573 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15574 u64 curbase = (u64)task_stack_page(current);
15575
15576 - if (user_mode_vm(regs))
15577 + if (user_mode(regs))
15578 return;
15579
15580 WARN_ONCE(regs->sp >= curbase &&
15581 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15582 index faba577..93b9e71 100644
15583 --- a/arch/x86/kernel/kgdb.c
15584 +++ b/arch/x86/kernel/kgdb.c
15585 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15586 #ifdef CONFIG_X86_32
15587 switch (regno) {
15588 case GDB_SS:
15589 - if (!user_mode_vm(regs))
15590 + if (!user_mode(regs))
15591 *(unsigned long *)mem = __KERNEL_DS;
15592 break;
15593 case GDB_SP:
15594 - if (!user_mode_vm(regs))
15595 + if (!user_mode(regs))
15596 *(unsigned long *)mem = kernel_stack_pointer(regs);
15597 break;
15598 case GDB_GS:
15599 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15600 case 'k':
15601 /* clear the trace bit */
15602 linux_regs->flags &= ~X86_EFLAGS_TF;
15603 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15604 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15605
15606 /* set the trace bit if we're stepping */
15607 if (remcomInBuffer[0] == 's') {
15608 linux_regs->flags |= X86_EFLAGS_TF;
15609 - atomic_set(&kgdb_cpu_doing_single_step,
15610 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15611 raw_smp_processor_id());
15612 }
15613
15614 @@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15615
15616 switch (cmd) {
15617 case DIE_DEBUG:
15618 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15619 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15620 if (user_mode(regs))
15621 return single_step_cont(regs, args);
15622 break;
15623 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15624 index 7da647d..5d3c4c1 100644
15625 --- a/arch/x86/kernel/kprobes.c
15626 +++ b/arch/x86/kernel/kprobes.c
15627 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15628 } __attribute__((packed)) *insn;
15629
15630 insn = (struct __arch_relative_insn *)from;
15631 +
15632 + pax_open_kernel();
15633 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15634 insn->op = op;
15635 + pax_close_kernel();
15636 }
15637
15638 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15639 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15640 kprobe_opcode_t opcode;
15641 kprobe_opcode_t *orig_opcodes = opcodes;
15642
15643 - if (search_exception_tables((unsigned long)opcodes))
15644 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15645 return 0; /* Page fault may occur on this address. */
15646
15647 retry:
15648 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15649 }
15650 }
15651 insn_get_length(&insn);
15652 + pax_open_kernel();
15653 memcpy(dest, insn.kaddr, insn.length);
15654 + pax_close_kernel();
15655
15656 #ifdef CONFIG_X86_64
15657 if (insn_rip_relative(&insn)) {
15658 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15659 (u8 *) dest;
15660 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15661 disp = (u8 *) dest + insn_offset_displacement(&insn);
15662 + pax_open_kernel();
15663 *(s32 *) disp = (s32) newdisp;
15664 + pax_close_kernel();
15665 }
15666 #endif
15667 return insn.length;
15668 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15669 */
15670 __copy_instruction(p->ainsn.insn, p->addr, 0);
15671
15672 - if (can_boost(p->addr))
15673 + if (can_boost(ktla_ktva(p->addr)))
15674 p->ainsn.boostable = 0;
15675 else
15676 p->ainsn.boostable = -1;
15677
15678 - p->opcode = *p->addr;
15679 + p->opcode = *(ktla_ktva(p->addr));
15680 }
15681
15682 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15683 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15684 * nor set current_kprobe, because it doesn't use single
15685 * stepping.
15686 */
15687 - regs->ip = (unsigned long)p->ainsn.insn;
15688 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15689 preempt_enable_no_resched();
15690 return;
15691 }
15692 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15693 if (p->opcode == BREAKPOINT_INSTRUCTION)
15694 regs->ip = (unsigned long)p->addr;
15695 else
15696 - regs->ip = (unsigned long)p->ainsn.insn;
15697 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15698 }
15699
15700 /*
15701 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15702 setup_singlestep(p, regs, kcb, 0);
15703 return 1;
15704 }
15705 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
15706 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15707 /*
15708 * The breakpoint instruction was removed right
15709 * after we hit it. Another cpu has removed
15710 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15711 " movq %rax, 152(%rsp)\n"
15712 RESTORE_REGS_STRING
15713 " popfq\n"
15714 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15715 + " btsq $63,(%rsp)\n"
15716 +#endif
15717 #else
15718 " pushf\n"
15719 SAVE_REGS_STRING
15720 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15721 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15722 {
15723 unsigned long *tos = stack_addr(regs);
15724 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15725 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15726 unsigned long orig_ip = (unsigned long)p->addr;
15727 kprobe_opcode_t *insn = p->ainsn.insn;
15728
15729 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15730 struct die_args *args = data;
15731 int ret = NOTIFY_DONE;
15732
15733 - if (args->regs && user_mode_vm(args->regs))
15734 + if (args->regs && user_mode(args->regs))
15735 return ret;
15736
15737 switch (val) {
15738 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15739 * Verify if the address gap is in 2GB range, because this uses
15740 * a relative jump.
15741 */
15742 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15743 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15744 if (abs(rel) > 0x7fffffff)
15745 return -ERANGE;
15746
15747 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15748 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15749
15750 /* Set probe function call */
15751 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15752 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15753
15754 /* Set returning jmp instruction at the tail of out-of-line buffer */
15755 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15756 - (u8 *)op->kp.addr + op->optinsn.size);
15757 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15758
15759 flush_icache_range((unsigned long) buf,
15760 (unsigned long) buf + TMPL_END_IDX +
15761 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15762 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15763
15764 /* Backup instructions which will be replaced by jump address */
15765 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15766 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15767 RELATIVE_ADDR_SIZE);
15768
15769 insn_buf[0] = RELATIVEJUMP_OPCODE;
15770 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15771 index a9c2116..a52d4fc 100644
15772 --- a/arch/x86/kernel/kvm.c
15773 +++ b/arch/x86/kernel/kvm.c
15774 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15775 pv_mmu_ops.set_pud = kvm_set_pud;
15776 #if PAGETABLE_LEVELS == 4
15777 pv_mmu_ops.set_pgd = kvm_set_pgd;
15778 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15779 #endif
15780 #endif
15781 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15782 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15783 index ea69726..604d066 100644
15784 --- a/arch/x86/kernel/ldt.c
15785 +++ b/arch/x86/kernel/ldt.c
15786 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15787 if (reload) {
15788 #ifdef CONFIG_SMP
15789 preempt_disable();
15790 - load_LDT(pc);
15791 + load_LDT_nolock(pc);
15792 if (!cpumask_equal(mm_cpumask(current->mm),
15793 cpumask_of(smp_processor_id())))
15794 smp_call_function(flush_ldt, current->mm, 1);
15795 preempt_enable();
15796 #else
15797 - load_LDT(pc);
15798 + load_LDT_nolock(pc);
15799 #endif
15800 }
15801 if (oldsize) {
15802 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15803 return err;
15804
15805 for (i = 0; i < old->size; i++)
15806 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15807 + write_ldt_entry(new->ldt, i, old->ldt + i);
15808 return 0;
15809 }
15810
15811 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15812 retval = copy_ldt(&mm->context, &old_mm->context);
15813 mutex_unlock(&old_mm->context.lock);
15814 }
15815 +
15816 + if (tsk == current) {
15817 + mm->context.vdso = 0;
15818 +
15819 +#ifdef CONFIG_X86_32
15820 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15821 + mm->context.user_cs_base = 0UL;
15822 + mm->context.user_cs_limit = ~0UL;
15823 +
15824 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15825 + cpus_clear(mm->context.cpu_user_cs_mask);
15826 +#endif
15827 +
15828 +#endif
15829 +#endif
15830 +
15831 + }
15832 +
15833 return retval;
15834 }
15835
15836 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15837 }
15838 }
15839
15840 +#ifdef CONFIG_PAX_SEGMEXEC
15841 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15842 + error = -EINVAL;
15843 + goto out_unlock;
15844 + }
15845 +#endif
15846 +
15847 fill_ldt(&ldt, &ldt_info);
15848 if (oldmode)
15849 ldt.avl = 0;
15850 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15851 index a3fa43b..8966f4c 100644
15852 --- a/arch/x86/kernel/machine_kexec_32.c
15853 +++ b/arch/x86/kernel/machine_kexec_32.c
15854 @@ -27,7 +27,7 @@
15855 #include <asm/cacheflush.h>
15856 #include <asm/debugreg.h>
15857
15858 -static void set_idt(void *newidt, __u16 limit)
15859 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15860 {
15861 struct desc_ptr curidt;
15862
15863 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15864 }
15865
15866
15867 -static void set_gdt(void *newgdt, __u16 limit)
15868 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15869 {
15870 struct desc_ptr curgdt;
15871
15872 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15873 }
15874
15875 control_page = page_address(image->control_code_page);
15876 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15877 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15878
15879 relocate_kernel_ptr = control_page;
15880 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15881 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15882 index 3ca42d0..7cff8cc 100644
15883 --- a/arch/x86/kernel/microcode_intel.c
15884 +++ b/arch/x86/kernel/microcode_intel.c
15885 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15886
15887 static int get_ucode_user(void *to, const void *from, size_t n)
15888 {
15889 - return copy_from_user(to, from, n);
15890 + return copy_from_user(to, (const void __force_user *)from, n);
15891 }
15892
15893 static enum ucode_state
15894 request_microcode_user(int cpu, const void __user *buf, size_t size)
15895 {
15896 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15897 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15898 }
15899
15900 static void microcode_fini_cpu(int cpu)
15901 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15902 index 925179f..267ac7a 100644
15903 --- a/arch/x86/kernel/module.c
15904 +++ b/arch/x86/kernel/module.c
15905 @@ -36,15 +36,60 @@
15906 #define DEBUGP(fmt...)
15907 #endif
15908
15909 -void *module_alloc(unsigned long size)
15910 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15911 {
15912 - if (PAGE_ALIGN(size) > MODULES_LEN)
15913 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
15914 return NULL;
15915 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15916 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15917 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15918 -1, __builtin_return_address(0));
15919 }
15920
15921 +void *module_alloc(unsigned long size)
15922 +{
15923 +
15924 +#ifdef CONFIG_PAX_KERNEXEC
15925 + return __module_alloc(size, PAGE_KERNEL);
15926 +#else
15927 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15928 +#endif
15929 +
15930 +}
15931 +
15932 +#ifdef CONFIG_PAX_KERNEXEC
15933 +#ifdef CONFIG_X86_32
15934 +void *module_alloc_exec(unsigned long size)
15935 +{
15936 + struct vm_struct *area;
15937 +
15938 + if (size == 0)
15939 + return NULL;
15940 +
15941 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15942 + return area ? area->addr : NULL;
15943 +}
15944 +EXPORT_SYMBOL(module_alloc_exec);
15945 +
15946 +void module_free_exec(struct module *mod, void *module_region)
15947 +{
15948 + vunmap(module_region);
15949 +}
15950 +EXPORT_SYMBOL(module_free_exec);
15951 +#else
15952 +void module_free_exec(struct module *mod, void *module_region)
15953 +{
15954 + module_free(mod, module_region);
15955 +}
15956 +EXPORT_SYMBOL(module_free_exec);
15957 +
15958 +void *module_alloc_exec(unsigned long size)
15959 +{
15960 + return __module_alloc(size, PAGE_KERNEL_RX);
15961 +}
15962 +EXPORT_SYMBOL(module_alloc_exec);
15963 +#endif
15964 +#endif
15965 +
15966 #ifdef CONFIG_X86_32
15967 int apply_relocate(Elf32_Shdr *sechdrs,
15968 const char *strtab,
15969 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15970 unsigned int i;
15971 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15972 Elf32_Sym *sym;
15973 - uint32_t *location;
15974 + uint32_t *plocation, location;
15975
15976 DEBUGP("Applying relocate section %u to %u\n", relsec,
15977 sechdrs[relsec].sh_info);
15978 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15979 /* This is where to make the change */
15980 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15981 - + rel[i].r_offset;
15982 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15983 + location = (uint32_t)plocation;
15984 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15985 + plocation = ktla_ktva((void *)plocation);
15986 /* This is the symbol it is referring to. Note that all
15987 undefined symbols have been resolved. */
15988 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15989 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15990 switch (ELF32_R_TYPE(rel[i].r_info)) {
15991 case R_386_32:
15992 /* We add the value into the location given */
15993 - *location += sym->st_value;
15994 + pax_open_kernel();
15995 + *plocation += sym->st_value;
15996 + pax_close_kernel();
15997 break;
15998 case R_386_PC32:
15999 /* Add the value, subtract its postition */
16000 - *location += sym->st_value - (uint32_t)location;
16001 + pax_open_kernel();
16002 + *plocation += sym->st_value - location;
16003 + pax_close_kernel();
16004 break;
16005 default:
16006 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16007 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16008 case R_X86_64_NONE:
16009 break;
16010 case R_X86_64_64:
16011 + pax_open_kernel();
16012 *(u64 *)loc = val;
16013 + pax_close_kernel();
16014 break;
16015 case R_X86_64_32:
16016 + pax_open_kernel();
16017 *(u32 *)loc = val;
16018 + pax_close_kernel();
16019 if (val != *(u32 *)loc)
16020 goto overflow;
16021 break;
16022 case R_X86_64_32S:
16023 + pax_open_kernel();
16024 *(s32 *)loc = val;
16025 + pax_close_kernel();
16026 if ((s64)val != *(s32 *)loc)
16027 goto overflow;
16028 break;
16029 case R_X86_64_PC32:
16030 val -= (u64)loc;
16031 + pax_open_kernel();
16032 *(u32 *)loc = val;
16033 + pax_close_kernel();
16034 +
16035 #if 0
16036 if ((s64)val != *(s32 *)loc)
16037 goto overflow;
16038 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16039 index e88f37b..1353db6 100644
16040 --- a/arch/x86/kernel/nmi.c
16041 +++ b/arch/x86/kernel/nmi.c
16042 @@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16043 dotraplinkage notrace __kprobes void
16044 do_nmi(struct pt_regs *regs, long error_code)
16045 {
16046 +
16047 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16048 + if (!user_mode(regs)) {
16049 + unsigned long cs = regs->cs & 0xFFFF;
16050 + unsigned long ip = ktva_ktla(regs->ip);
16051 +
16052 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16053 + regs->ip = ip;
16054 + }
16055 +#endif
16056 +
16057 nmi_enter();
16058
16059 inc_irq_stat(__nmi_count);
16060 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16061 index 676b8c7..870ba04 100644
16062 --- a/arch/x86/kernel/paravirt-spinlocks.c
16063 +++ b/arch/x86/kernel/paravirt-spinlocks.c
16064 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16065 arch_spin_lock(lock);
16066 }
16067
16068 -struct pv_lock_ops pv_lock_ops = {
16069 +struct pv_lock_ops pv_lock_ops __read_only = {
16070 #ifdef CONFIG_SMP
16071 .spin_is_locked = __ticket_spin_is_locked,
16072 .spin_is_contended = __ticket_spin_is_contended,
16073 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16074 index d90272e..6bb013b 100644
16075 --- a/arch/x86/kernel/paravirt.c
16076 +++ b/arch/x86/kernel/paravirt.c
16077 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16078 {
16079 return x;
16080 }
16081 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16082 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16083 +#endif
16084
16085 void __init default_banner(void)
16086 {
16087 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16088 if (opfunc == NULL)
16089 /* If there's no function, patch it with a ud2a (BUG) */
16090 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16091 - else if (opfunc == _paravirt_nop)
16092 + else if (opfunc == (void *)_paravirt_nop)
16093 /* If the operation is a nop, then nop the callsite */
16094 ret = paravirt_patch_nop();
16095
16096 /* identity functions just return their single argument */
16097 - else if (opfunc == _paravirt_ident_32)
16098 + else if (opfunc == (void *)_paravirt_ident_32)
16099 ret = paravirt_patch_ident_32(insnbuf, len);
16100 - else if (opfunc == _paravirt_ident_64)
16101 + else if (opfunc == (void *)_paravirt_ident_64)
16102 ret = paravirt_patch_ident_64(insnbuf, len);
16103 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16104 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16105 + ret = paravirt_patch_ident_64(insnbuf, len);
16106 +#endif
16107
16108 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16109 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16110 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16111 if (insn_len > len || start == NULL)
16112 insn_len = len;
16113 else
16114 - memcpy(insnbuf, start, insn_len);
16115 + memcpy(insnbuf, ktla_ktva(start), insn_len);
16116
16117 return insn_len;
16118 }
16119 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16120 preempt_enable();
16121 }
16122
16123 -struct pv_info pv_info = {
16124 +struct pv_info pv_info __read_only = {
16125 .name = "bare hardware",
16126 .paravirt_enabled = 0,
16127 .kernel_rpl = 0,
16128 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
16129 #endif
16130 };
16131
16132 -struct pv_init_ops pv_init_ops = {
16133 +struct pv_init_ops pv_init_ops __read_only = {
16134 .patch = native_patch,
16135 };
16136
16137 -struct pv_time_ops pv_time_ops = {
16138 +struct pv_time_ops pv_time_ops __read_only = {
16139 .sched_clock = native_sched_clock,
16140 .steal_clock = native_steal_clock,
16141 };
16142
16143 -struct pv_irq_ops pv_irq_ops = {
16144 +struct pv_irq_ops pv_irq_ops __read_only = {
16145 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16146 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16147 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16148 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16149 #endif
16150 };
16151
16152 -struct pv_cpu_ops pv_cpu_ops = {
16153 +struct pv_cpu_ops pv_cpu_ops __read_only = {
16154 .cpuid = native_cpuid,
16155 .get_debugreg = native_get_debugreg,
16156 .set_debugreg = native_set_debugreg,
16157 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16158 .end_context_switch = paravirt_nop,
16159 };
16160
16161 -struct pv_apic_ops pv_apic_ops = {
16162 +struct pv_apic_ops pv_apic_ops __read_only = {
16163 #ifdef CONFIG_X86_LOCAL_APIC
16164 .startup_ipi_hook = paravirt_nop,
16165 #endif
16166 };
16167
16168 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16169 +#ifdef CONFIG_X86_32
16170 +#ifdef CONFIG_X86_PAE
16171 +/* 64-bit pagetable entries */
16172 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16173 +#else
16174 /* 32-bit pagetable entries */
16175 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16176 +#endif
16177 #else
16178 /* 64-bit pagetable entries */
16179 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16180 #endif
16181
16182 -struct pv_mmu_ops pv_mmu_ops = {
16183 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16184
16185 .read_cr2 = native_read_cr2,
16186 .write_cr2 = native_write_cr2,
16187 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16188 .make_pud = PTE_IDENT,
16189
16190 .set_pgd = native_set_pgd,
16191 + .set_pgd_batched = native_set_pgd_batched,
16192 #endif
16193 #endif /* PAGETABLE_LEVELS >= 3 */
16194
16195 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16196 },
16197
16198 .set_fixmap = native_set_fixmap,
16199 +
16200 +#ifdef CONFIG_PAX_KERNEXEC
16201 + .pax_open_kernel = native_pax_open_kernel,
16202 + .pax_close_kernel = native_pax_close_kernel,
16203 +#endif
16204 +
16205 };
16206
16207 EXPORT_SYMBOL_GPL(pv_time_ops);
16208 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16209 index 35ccf75..7a15747 100644
16210 --- a/arch/x86/kernel/pci-iommu_table.c
16211 +++ b/arch/x86/kernel/pci-iommu_table.c
16212 @@ -2,7 +2,7 @@
16213 #include <asm/iommu_table.h>
16214 #include <linux/string.h>
16215 #include <linux/kallsyms.h>
16216 -
16217 +#include <linux/sched.h>
16218
16219 #define DEBUG 1
16220
16221 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16222 index ee5d4fb..426649b 100644
16223 --- a/arch/x86/kernel/process.c
16224 +++ b/arch/x86/kernel/process.c
16225 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16226
16227 void free_thread_info(struct thread_info *ti)
16228 {
16229 - free_thread_xstate(ti->task);
16230 free_pages((unsigned long)ti, THREAD_ORDER);
16231 }
16232
16233 +static struct kmem_cache *task_struct_cachep;
16234 +
16235 void arch_task_cache_init(void)
16236 {
16237 - task_xstate_cachep =
16238 - kmem_cache_create("task_xstate", xstate_size,
16239 + /* create a slab on which task_structs can be allocated */
16240 + task_struct_cachep =
16241 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16242 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16243 +
16244 + task_xstate_cachep =
16245 + kmem_cache_create("task_xstate", xstate_size,
16246 __alignof__(union thread_xstate),
16247 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16248 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16249 +}
16250 +
16251 +struct task_struct *alloc_task_struct_node(int node)
16252 +{
16253 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16254 +}
16255 +
16256 +void free_task_struct(struct task_struct *task)
16257 +{
16258 + free_thread_xstate(task);
16259 + kmem_cache_free(task_struct_cachep, task);
16260 }
16261
16262 /*
16263 @@ -70,7 +87,7 @@ void exit_thread(void)
16264 unsigned long *bp = t->io_bitmap_ptr;
16265
16266 if (bp) {
16267 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16268 + struct tss_struct *tss = init_tss + get_cpu();
16269
16270 t->io_bitmap_ptr = NULL;
16271 clear_thread_flag(TIF_IO_BITMAP);
16272 @@ -106,7 +123,7 @@ void show_regs_common(void)
16273
16274 printk(KERN_CONT "\n");
16275 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16276 - current->pid, current->comm, print_tainted(),
16277 + task_pid_nr(current), current->comm, print_tainted(),
16278 init_utsname()->release,
16279 (int)strcspn(init_utsname()->version, " "),
16280 init_utsname()->version);
16281 @@ -120,6 +137,9 @@ void flush_thread(void)
16282 {
16283 struct task_struct *tsk = current;
16284
16285 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16286 + loadsegment(gs, 0);
16287 +#endif
16288 flush_ptrace_hw_breakpoint(tsk);
16289 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16290 /*
16291 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16292 regs.di = (unsigned long) arg;
16293
16294 #ifdef CONFIG_X86_32
16295 - regs.ds = __USER_DS;
16296 - regs.es = __USER_DS;
16297 + regs.ds = __KERNEL_DS;
16298 + regs.es = __KERNEL_DS;
16299 regs.fs = __KERNEL_PERCPU;
16300 - regs.gs = __KERNEL_STACK_CANARY;
16301 + savesegment(gs, regs.gs);
16302 #else
16303 regs.ss = __KERNEL_DS;
16304 #endif
16305 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16306
16307 return ret;
16308 }
16309 -void stop_this_cpu(void *dummy)
16310 +__noreturn void stop_this_cpu(void *dummy)
16311 {
16312 local_irq_disable();
16313 /*
16314 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16315 }
16316 early_param("idle", idle_setup);
16317
16318 -unsigned long arch_align_stack(unsigned long sp)
16319 +#ifdef CONFIG_PAX_RANDKSTACK
16320 +void pax_randomize_kstack(struct pt_regs *regs)
16321 {
16322 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16323 - sp -= get_random_int() % 8192;
16324 - return sp & ~0xf;
16325 -}
16326 + struct thread_struct *thread = &current->thread;
16327 + unsigned long time;
16328
16329 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16330 -{
16331 - unsigned long range_end = mm->brk + 0x02000000;
16332 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16333 -}
16334 + if (!randomize_va_space)
16335 + return;
16336 +
16337 + if (v8086_mode(regs))
16338 + return;
16339
16340 + rdtscl(time);
16341 +
16342 + /* P4 seems to return a 0 LSB, ignore it */
16343 +#ifdef CONFIG_MPENTIUM4
16344 + time &= 0x3EUL;
16345 + time <<= 2;
16346 +#elif defined(CONFIG_X86_64)
16347 + time &= 0xFUL;
16348 + time <<= 4;
16349 +#else
16350 + time &= 0x1FUL;
16351 + time <<= 3;
16352 +#endif
16353 +
16354 + thread->sp0 ^= time;
16355 + load_sp0(init_tss + smp_processor_id(), thread);
16356 +
16357 +#ifdef CONFIG_X86_64
16358 + percpu_write(kernel_stack, thread->sp0);
16359 +#endif
16360 +}
16361 +#endif
16362 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16363 index 795b79f..063767a 100644
16364 --- a/arch/x86/kernel/process_32.c
16365 +++ b/arch/x86/kernel/process_32.c
16366 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16367 unsigned long thread_saved_pc(struct task_struct *tsk)
16368 {
16369 return ((unsigned long *)tsk->thread.sp)[3];
16370 +//XXX return tsk->thread.eip;
16371 }
16372
16373 #ifndef CONFIG_SMP
16374 @@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16375 unsigned long sp;
16376 unsigned short ss, gs;
16377
16378 - if (user_mode_vm(regs)) {
16379 + if (user_mode(regs)) {
16380 sp = regs->sp;
16381 ss = regs->ss & 0xffff;
16382 - gs = get_user_gs(regs);
16383 } else {
16384 sp = kernel_stack_pointer(regs);
16385 savesegment(ss, ss);
16386 - savesegment(gs, gs);
16387 }
16388 + gs = get_user_gs(regs);
16389
16390 show_regs_common();
16391
16392 @@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16393 struct task_struct *tsk;
16394 int err;
16395
16396 - childregs = task_pt_regs(p);
16397 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16398 *childregs = *regs;
16399 childregs->ax = 0;
16400 childregs->sp = sp;
16401
16402 p->thread.sp = (unsigned long) childregs;
16403 p->thread.sp0 = (unsigned long) (childregs+1);
16404 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16405
16406 p->thread.ip = (unsigned long) ret_from_fork;
16407
16408 @@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16409 struct thread_struct *prev = &prev_p->thread,
16410 *next = &next_p->thread;
16411 int cpu = smp_processor_id();
16412 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16413 + struct tss_struct *tss = init_tss + cpu;
16414 bool preload_fpu;
16415
16416 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16417 @@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16418 */
16419 lazy_save_gs(prev->gs);
16420
16421 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16422 + __set_fs(task_thread_info(next_p)->addr_limit);
16423 +#endif
16424 +
16425 /*
16426 * Load the per-thread Thread-Local Storage descriptor.
16427 */
16428 @@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16429 */
16430 arch_end_context_switch(next_p);
16431
16432 + percpu_write(current_task, next_p);
16433 + percpu_write(current_tinfo, &next_p->tinfo);
16434 +
16435 if (preload_fpu)
16436 __math_state_restore();
16437
16438 @@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16439 if (prev->gs | next->gs)
16440 lazy_load_gs(next->gs);
16441
16442 - percpu_write(current_task, next_p);
16443 -
16444 return prev_p;
16445 }
16446
16447 @@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
16448 } while (count++ < 16);
16449 return 0;
16450 }
16451 -
16452 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16453 index 3bd7e6e..90b2bcf 100644
16454 --- a/arch/x86/kernel/process_64.c
16455 +++ b/arch/x86/kernel/process_64.c
16456 @@ -89,7 +89,7 @@ static void __exit_idle(void)
16457 void exit_idle(void)
16458 {
16459 /* idle loop has pid 0 */
16460 - if (current->pid)
16461 + if (task_pid_nr(current))
16462 return;
16463 __exit_idle();
16464 }
16465 @@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16466 struct pt_regs *childregs;
16467 struct task_struct *me = current;
16468
16469 - childregs = ((struct pt_regs *)
16470 - (THREAD_SIZE + task_stack_page(p))) - 1;
16471 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16472 *childregs = *regs;
16473
16474 childregs->ax = 0;
16475 @@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16476 p->thread.sp = (unsigned long) childregs;
16477 p->thread.sp0 = (unsigned long) (childregs+1);
16478 p->thread.usersp = me->thread.usersp;
16479 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16480
16481 set_tsk_thread_flag(p, TIF_FORK);
16482
16483 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16484 struct thread_struct *prev = &prev_p->thread;
16485 struct thread_struct *next = &next_p->thread;
16486 int cpu = smp_processor_id();
16487 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16488 + struct tss_struct *tss = init_tss + cpu;
16489 unsigned fsindex, gsindex;
16490 bool preload_fpu;
16491
16492 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16493 prev->usersp = percpu_read(old_rsp);
16494 percpu_write(old_rsp, next->usersp);
16495 percpu_write(current_task, next_p);
16496 + percpu_write(current_tinfo, &next_p->tinfo);
16497
16498 - percpu_write(kernel_stack,
16499 - (unsigned long)task_stack_page(next_p) +
16500 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16501 + percpu_write(kernel_stack, next->sp0);
16502
16503 /*
16504 * Now maybe reload the debug registers and handle I/O bitmaps
16505 @@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
16506 if (!p || p == current || p->state == TASK_RUNNING)
16507 return 0;
16508 stack = (unsigned long)task_stack_page(p);
16509 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16510 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16511 return 0;
16512 fp = *(u64 *)(p->thread.sp);
16513 do {
16514 - if (fp < (unsigned long)stack ||
16515 - fp >= (unsigned long)stack+THREAD_SIZE)
16516 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16517 return 0;
16518 ip = *(u64 *)(fp+8);
16519 if (!in_sched_functions(ip))
16520 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16521 index 8252879..d3219e0 100644
16522 --- a/arch/x86/kernel/ptrace.c
16523 +++ b/arch/x86/kernel/ptrace.c
16524 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16525 unsigned long addr, unsigned long data)
16526 {
16527 int ret;
16528 - unsigned long __user *datap = (unsigned long __user *)data;
16529 + unsigned long __user *datap = (__force unsigned long __user *)data;
16530
16531 switch (request) {
16532 /* read the word at location addr in the USER area. */
16533 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16534 if ((int) addr < 0)
16535 return -EIO;
16536 ret = do_get_thread_area(child, addr,
16537 - (struct user_desc __user *)data);
16538 + (__force struct user_desc __user *) data);
16539 break;
16540
16541 case PTRACE_SET_THREAD_AREA:
16542 if ((int) addr < 0)
16543 return -EIO;
16544 ret = do_set_thread_area(child, addr,
16545 - (struct user_desc __user *)data, 0);
16546 + (__force struct user_desc __user *) data, 0);
16547 break;
16548 #endif
16549
16550 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16551 memset(info, 0, sizeof(*info));
16552 info->si_signo = SIGTRAP;
16553 info->si_code = si_code;
16554 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16555 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16556 }
16557
16558 void user_single_step_siginfo(struct task_struct *tsk,
16559 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16560 index 42eb330..139955c 100644
16561 --- a/arch/x86/kernel/pvclock.c
16562 +++ b/arch/x86/kernel/pvclock.c
16563 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16564 return pv_tsc_khz;
16565 }
16566
16567 -static atomic64_t last_value = ATOMIC64_INIT(0);
16568 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16569
16570 void pvclock_resume(void)
16571 {
16572 - atomic64_set(&last_value, 0);
16573 + atomic64_set_unchecked(&last_value, 0);
16574 }
16575
16576 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16577 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16578 * updating at the same time, and one of them could be slightly behind,
16579 * making the assumption that last_value always go forward fail to hold.
16580 */
16581 - last = atomic64_read(&last_value);
16582 + last = atomic64_read_unchecked(&last_value);
16583 do {
16584 if (ret < last)
16585 return last;
16586 - last = atomic64_cmpxchg(&last_value, last, ret);
16587 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16588 } while (unlikely(last != ret));
16589
16590 return ret;
16591 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16592 index 37a458b..e63d183 100644
16593 --- a/arch/x86/kernel/reboot.c
16594 +++ b/arch/x86/kernel/reboot.c
16595 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16596 EXPORT_SYMBOL(pm_power_off);
16597
16598 static const struct desc_ptr no_idt = {};
16599 -static int reboot_mode;
16600 +static unsigned short reboot_mode;
16601 enum reboot_type reboot_type = BOOT_ACPI;
16602 int reboot_force;
16603
16604 @@ -324,13 +324,17 @@ core_initcall(reboot_init);
16605 extern const unsigned char machine_real_restart_asm[];
16606 extern const u64 machine_real_restart_gdt[3];
16607
16608 -void machine_real_restart(unsigned int type)
16609 +__noreturn void machine_real_restart(unsigned int type)
16610 {
16611 void *restart_va;
16612 unsigned long restart_pa;
16613 - void (*restart_lowmem)(unsigned int);
16614 + void (* __noreturn restart_lowmem)(unsigned int);
16615 u64 *lowmem_gdt;
16616
16617 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16618 + struct desc_struct *gdt;
16619 +#endif
16620 +
16621 local_irq_disable();
16622
16623 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16624 @@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16625 boot)". This seems like a fairly standard thing that gets set by
16626 REBOOT.COM programs, and the previous reset routine did this
16627 too. */
16628 - *((unsigned short *)0x472) = reboot_mode;
16629 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16630
16631 /* Patch the GDT in the low memory trampoline */
16632 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16633
16634 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16635 restart_pa = virt_to_phys(restart_va);
16636 - restart_lowmem = (void (*)(unsigned int))restart_pa;
16637 + restart_lowmem = (void *)restart_pa;
16638
16639 /* GDT[0]: GDT self-pointer */
16640 lowmem_gdt[0] =
16641 @@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16642 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16643
16644 /* Jump to the identity-mapped low memory code */
16645 +
16646 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16647 + gdt = get_cpu_gdt_table(smp_processor_id());
16648 + pax_open_kernel();
16649 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16650 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16651 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16652 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16653 +#endif
16654 +#ifdef CONFIG_PAX_KERNEXEC
16655 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16656 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16657 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16658 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16659 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16660 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16661 +#endif
16662 + pax_close_kernel();
16663 +#endif
16664 +
16665 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16666 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16667 + unreachable();
16668 +#else
16669 restart_lowmem(type);
16670 +#endif
16671 +
16672 }
16673 #ifdef CONFIG_APM_MODULE
16674 EXPORT_SYMBOL(machine_real_restart);
16675 @@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16676 * try to force a triple fault and then cycle between hitting the keyboard
16677 * controller and doing that
16678 */
16679 -static void native_machine_emergency_restart(void)
16680 +__noreturn static void native_machine_emergency_restart(void)
16681 {
16682 int i;
16683 int attempt = 0;
16684 @@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16685 #endif
16686 }
16687
16688 -static void __machine_emergency_restart(int emergency)
16689 +static __noreturn void __machine_emergency_restart(int emergency)
16690 {
16691 reboot_emergency = emergency;
16692 machine_ops.emergency_restart();
16693 }
16694
16695 -static void native_machine_restart(char *__unused)
16696 +static __noreturn void native_machine_restart(char *__unused)
16697 {
16698 printk("machine restart\n");
16699
16700 @@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16701 __machine_emergency_restart(0);
16702 }
16703
16704 -static void native_machine_halt(void)
16705 +static __noreturn void native_machine_halt(void)
16706 {
16707 /* stop other cpus and apics */
16708 machine_shutdown();
16709 @@ -690,7 +720,7 @@ static void native_machine_halt(void)
16710 stop_this_cpu(NULL);
16711 }
16712
16713 -static void native_machine_power_off(void)
16714 +__noreturn static void native_machine_power_off(void)
16715 {
16716 if (pm_power_off) {
16717 if (!reboot_force)
16718 @@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16719 }
16720 /* a fallback in case there is no PM info available */
16721 tboot_shutdown(TB_SHUTDOWN_HALT);
16722 + unreachable();
16723 }
16724
16725 struct machine_ops machine_ops = {
16726 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16727 index 7a6f3b3..bed145d7 100644
16728 --- a/arch/x86/kernel/relocate_kernel_64.S
16729 +++ b/arch/x86/kernel/relocate_kernel_64.S
16730 @@ -11,6 +11,7 @@
16731 #include <asm/kexec.h>
16732 #include <asm/processor-flags.h>
16733 #include <asm/pgtable_types.h>
16734 +#include <asm/alternative-asm.h>
16735
16736 /*
16737 * Must be relocatable PIC code callable as a C function
16738 @@ -160,13 +161,14 @@ identity_mapped:
16739 xorq %rbp, %rbp
16740 xorq %r8, %r8
16741 xorq %r9, %r9
16742 - xorq %r10, %r9
16743 + xorq %r10, %r10
16744 xorq %r11, %r11
16745 xorq %r12, %r12
16746 xorq %r13, %r13
16747 xorq %r14, %r14
16748 xorq %r15, %r15
16749
16750 + pax_force_retaddr 0, 1
16751 ret
16752
16753 1:
16754 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16755 index cf0ef98..e3f780b 100644
16756 --- a/arch/x86/kernel/setup.c
16757 +++ b/arch/x86/kernel/setup.c
16758 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16759
16760 switch (data->type) {
16761 case SETUP_E820_EXT:
16762 - parse_e820_ext(data);
16763 + parse_e820_ext((struct setup_data __force_kernel *)data);
16764 break;
16765 case SETUP_DTB:
16766 add_dtb(pa_data);
16767 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16768 * area (640->1Mb) as ram even though it is not.
16769 * take them out.
16770 */
16771 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16772 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16773 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16774 }
16775
16776 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16777
16778 if (!boot_params.hdr.root_flags)
16779 root_mountflags &= ~MS_RDONLY;
16780 - init_mm.start_code = (unsigned long) _text;
16781 - init_mm.end_code = (unsigned long) _etext;
16782 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16783 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16784 init_mm.end_data = (unsigned long) _edata;
16785 init_mm.brk = _brk_end;
16786
16787 - code_resource.start = virt_to_phys(_text);
16788 - code_resource.end = virt_to_phys(_etext)-1;
16789 - data_resource.start = virt_to_phys(_etext);
16790 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16791 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16792 + data_resource.start = virt_to_phys(_sdata);
16793 data_resource.end = virt_to_phys(_edata)-1;
16794 bss_resource.start = virt_to_phys(&__bss_start);
16795 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16796 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16797 index 71f4727..16dc9f7 100644
16798 --- a/arch/x86/kernel/setup_percpu.c
16799 +++ b/arch/x86/kernel/setup_percpu.c
16800 @@ -21,19 +21,17 @@
16801 #include <asm/cpu.h>
16802 #include <asm/stackprotector.h>
16803
16804 -DEFINE_PER_CPU(int, cpu_number);
16805 +#ifdef CONFIG_SMP
16806 +DEFINE_PER_CPU(unsigned int, cpu_number);
16807 EXPORT_PER_CPU_SYMBOL(cpu_number);
16808 +#endif
16809
16810 -#ifdef CONFIG_X86_64
16811 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16812 -#else
16813 -#define BOOT_PERCPU_OFFSET 0
16814 -#endif
16815
16816 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16817 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16818
16819 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16820 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16821 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16822 };
16823 EXPORT_SYMBOL(__per_cpu_offset);
16824 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16825 {
16826 #ifdef CONFIG_X86_32
16827 struct desc_struct gdt;
16828 + unsigned long base = per_cpu_offset(cpu);
16829
16830 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16831 - 0x2 | DESCTYPE_S, 0x8);
16832 - gdt.s = 1;
16833 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16834 + 0x83 | DESCTYPE_S, 0xC);
16835 write_gdt_entry(get_cpu_gdt_table(cpu),
16836 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16837 #endif
16838 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16839 /* alrighty, percpu areas up and running */
16840 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16841 for_each_possible_cpu(cpu) {
16842 +#ifdef CONFIG_CC_STACKPROTECTOR
16843 +#ifdef CONFIG_X86_32
16844 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16845 +#endif
16846 +#endif
16847 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16848 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16849 per_cpu(cpu_number, cpu) = cpu;
16850 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16851 */
16852 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16853 #endif
16854 +#ifdef CONFIG_CC_STACKPROTECTOR
16855 +#ifdef CONFIG_X86_32
16856 + if (!cpu)
16857 + per_cpu(stack_canary.canary, cpu) = canary;
16858 +#endif
16859 +#endif
16860 /*
16861 * Up to this point, the boot CPU has been using .init.data
16862 * area. Reload any changed state for the boot CPU.
16863 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16864 index 54ddaeb2..22c3bdc 100644
16865 --- a/arch/x86/kernel/signal.c
16866 +++ b/arch/x86/kernel/signal.c
16867 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16868 * Align the stack pointer according to the i386 ABI,
16869 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16870 */
16871 - sp = ((sp + 4) & -16ul) - 4;
16872 + sp = ((sp - 12) & -16ul) - 4;
16873 #else /* !CONFIG_X86_32 */
16874 sp = round_down(sp, 16) - 8;
16875 #endif
16876 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16877 * Return an always-bogus address instead so we will die with SIGSEGV.
16878 */
16879 if (onsigstack && !likely(on_sig_stack(sp)))
16880 - return (void __user *)-1L;
16881 + return (__force void __user *)-1L;
16882
16883 /* save i387 state */
16884 if (used_math() && save_i387_xstate(*fpstate) < 0)
16885 - return (void __user *)-1L;
16886 + return (__force void __user *)-1L;
16887
16888 return (void __user *)sp;
16889 }
16890 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16891 }
16892
16893 if (current->mm->context.vdso)
16894 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16895 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16896 else
16897 - restorer = &frame->retcode;
16898 + restorer = (void __user *)&frame->retcode;
16899 if (ka->sa.sa_flags & SA_RESTORER)
16900 restorer = ka->sa.sa_restorer;
16901
16902 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16903 * reasons and because gdb uses it as a signature to notice
16904 * signal handler stack frames.
16905 */
16906 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16907 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16908
16909 if (err)
16910 return -EFAULT;
16911 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16912 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16913
16914 /* Set up to return from userspace. */
16915 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16916 + if (current->mm->context.vdso)
16917 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16918 + else
16919 + restorer = (void __user *)&frame->retcode;
16920 if (ka->sa.sa_flags & SA_RESTORER)
16921 restorer = ka->sa.sa_restorer;
16922 put_user_ex(restorer, &frame->pretcode);
16923 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16924 * reasons and because gdb uses it as a signature to notice
16925 * signal handler stack frames.
16926 */
16927 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16928 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16929 } put_user_catch(err);
16930
16931 if (err)
16932 @@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
16933 * X86_32: vm86 regs switched out by assembly code before reaching
16934 * here, so testing against kernel CS suffices.
16935 */
16936 - if (!user_mode(regs))
16937 + if (!user_mode_novm(regs))
16938 return;
16939
16940 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16941 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16942 index 9f548cb..caf76f7 100644
16943 --- a/arch/x86/kernel/smpboot.c
16944 +++ b/arch/x86/kernel/smpboot.c
16945 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16946 set_idle_for_cpu(cpu, c_idle.idle);
16947 do_rest:
16948 per_cpu(current_task, cpu) = c_idle.idle;
16949 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16950 #ifdef CONFIG_X86_32
16951 /* Stack for startup_32 can be just as for start_secondary onwards */
16952 irq_ctx_init(cpu);
16953 #else
16954 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16955 initial_gs = per_cpu_offset(cpu);
16956 - per_cpu(kernel_stack, cpu) =
16957 - (unsigned long)task_stack_page(c_idle.idle) -
16958 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16959 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16960 #endif
16961 +
16962 + pax_open_kernel();
16963 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16964 + pax_close_kernel();
16965 +
16966 initial_code = (unsigned long)start_secondary;
16967 stack_start = c_idle.idle->thread.sp;
16968
16969 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16970
16971 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16972
16973 +#ifdef CONFIG_PAX_PER_CPU_PGD
16974 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16975 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16976 + KERNEL_PGD_PTRS);
16977 +#endif
16978 +
16979 err = do_boot_cpu(apicid, cpu);
16980 if (err) {
16981 pr_debug("do_boot_cpu failed %d\n", err);
16982 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16983 index c346d11..d43b163 100644
16984 --- a/arch/x86/kernel/step.c
16985 +++ b/arch/x86/kernel/step.c
16986 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16987 struct desc_struct *desc;
16988 unsigned long base;
16989
16990 - seg &= ~7UL;
16991 + seg >>= 3;
16992
16993 mutex_lock(&child->mm->context.lock);
16994 - if (unlikely((seg >> 3) >= child->mm->context.size))
16995 + if (unlikely(seg >= child->mm->context.size))
16996 addr = -1L; /* bogus selector, access would fault */
16997 else {
16998 desc = child->mm->context.ldt + seg;
16999 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17000 addr += base;
17001 }
17002 mutex_unlock(&child->mm->context.lock);
17003 - }
17004 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17005 + addr = ktla_ktva(addr);
17006
17007 return addr;
17008 }
17009 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17010 unsigned char opcode[15];
17011 unsigned long addr = convert_ip_to_linear(child, regs);
17012
17013 + if (addr == -EINVAL)
17014 + return 0;
17015 +
17016 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17017 for (i = 0; i < copied; i++) {
17018 switch (opcode[i]) {
17019 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17020 index 0b0cb5f..db6b9ed 100644
17021 --- a/arch/x86/kernel/sys_i386_32.c
17022 +++ b/arch/x86/kernel/sys_i386_32.c
17023 @@ -24,17 +24,224 @@
17024
17025 #include <asm/syscalls.h>
17026
17027 -/*
17028 - * Do a system call from kernel instead of calling sys_execve so we
17029 - * end up with proper pt_regs.
17030 - */
17031 -int kernel_execve(const char *filename,
17032 - const char *const argv[],
17033 - const char *const envp[])
17034 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17035 {
17036 - long __res;
17037 - asm volatile ("int $0x80"
17038 - : "=a" (__res)
17039 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17040 - return __res;
17041 + unsigned long pax_task_size = TASK_SIZE;
17042 +
17043 +#ifdef CONFIG_PAX_SEGMEXEC
17044 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17045 + pax_task_size = SEGMEXEC_TASK_SIZE;
17046 +#endif
17047 +
17048 + if (len > pax_task_size || addr > pax_task_size - len)
17049 + return -EINVAL;
17050 +
17051 + return 0;
17052 +}
17053 +
17054 +unsigned long
17055 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
17056 + unsigned long len, unsigned long pgoff, unsigned long flags)
17057 +{
17058 + struct mm_struct *mm = current->mm;
17059 + struct vm_area_struct *vma;
17060 + unsigned long start_addr, pax_task_size = TASK_SIZE;
17061 +
17062 +#ifdef CONFIG_PAX_SEGMEXEC
17063 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17064 + pax_task_size = SEGMEXEC_TASK_SIZE;
17065 +#endif
17066 +
17067 + pax_task_size -= PAGE_SIZE;
17068 +
17069 + if (len > pax_task_size)
17070 + return -ENOMEM;
17071 +
17072 + if (flags & MAP_FIXED)
17073 + return addr;
17074 +
17075 +#ifdef CONFIG_PAX_RANDMMAP
17076 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17077 +#endif
17078 +
17079 + if (addr) {
17080 + addr = PAGE_ALIGN(addr);
17081 + if (pax_task_size - len >= addr) {
17082 + vma = find_vma(mm, addr);
17083 + if (check_heap_stack_gap(vma, addr, len))
17084 + return addr;
17085 + }
17086 + }
17087 + if (len > mm->cached_hole_size) {
17088 + start_addr = addr = mm->free_area_cache;
17089 + } else {
17090 + start_addr = addr = mm->mmap_base;
17091 + mm->cached_hole_size = 0;
17092 + }
17093 +
17094 +#ifdef CONFIG_PAX_PAGEEXEC
17095 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17096 + start_addr = 0x00110000UL;
17097 +
17098 +#ifdef CONFIG_PAX_RANDMMAP
17099 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17100 + start_addr += mm->delta_mmap & 0x03FFF000UL;
17101 +#endif
17102 +
17103 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17104 + start_addr = addr = mm->mmap_base;
17105 + else
17106 + addr = start_addr;
17107 + }
17108 +#endif
17109 +
17110 +full_search:
17111 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17112 + /* At this point: (!vma || addr < vma->vm_end). */
17113 + if (pax_task_size - len < addr) {
17114 + /*
17115 + * Start a new search - just in case we missed
17116 + * some holes.
17117 + */
17118 + if (start_addr != mm->mmap_base) {
17119 + start_addr = addr = mm->mmap_base;
17120 + mm->cached_hole_size = 0;
17121 + goto full_search;
17122 + }
17123 + return -ENOMEM;
17124 + }
17125 + if (check_heap_stack_gap(vma, addr, len))
17126 + break;
17127 + if (addr + mm->cached_hole_size < vma->vm_start)
17128 + mm->cached_hole_size = vma->vm_start - addr;
17129 + addr = vma->vm_end;
17130 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
17131 + start_addr = addr = mm->mmap_base;
17132 + mm->cached_hole_size = 0;
17133 + goto full_search;
17134 + }
17135 + }
17136 +
17137 + /*
17138 + * Remember the place where we stopped the search:
17139 + */
17140 + mm->free_area_cache = addr + len;
17141 + return addr;
17142 +}
17143 +
17144 +unsigned long
17145 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17146 + const unsigned long len, const unsigned long pgoff,
17147 + const unsigned long flags)
17148 +{
17149 + struct vm_area_struct *vma;
17150 + struct mm_struct *mm = current->mm;
17151 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17152 +
17153 +#ifdef CONFIG_PAX_SEGMEXEC
17154 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17155 + pax_task_size = SEGMEXEC_TASK_SIZE;
17156 +#endif
17157 +
17158 + pax_task_size -= PAGE_SIZE;
17159 +
17160 + /* requested length too big for entire address space */
17161 + if (len > pax_task_size)
17162 + return -ENOMEM;
17163 +
17164 + if (flags & MAP_FIXED)
17165 + return addr;
17166 +
17167 +#ifdef CONFIG_PAX_PAGEEXEC
17168 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17169 + goto bottomup;
17170 +#endif
17171 +
17172 +#ifdef CONFIG_PAX_RANDMMAP
17173 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17174 +#endif
17175 +
17176 + /* requesting a specific address */
17177 + if (addr) {
17178 + addr = PAGE_ALIGN(addr);
17179 + if (pax_task_size - len >= addr) {
17180 + vma = find_vma(mm, addr);
17181 + if (check_heap_stack_gap(vma, addr, len))
17182 + return addr;
17183 + }
17184 + }
17185 +
17186 + /* check if free_area_cache is useful for us */
17187 + if (len <= mm->cached_hole_size) {
17188 + mm->cached_hole_size = 0;
17189 + mm->free_area_cache = mm->mmap_base;
17190 + }
17191 +
17192 + /* either no address requested or can't fit in requested address hole */
17193 + addr = mm->free_area_cache;
17194 +
17195 + /* make sure it can fit in the remaining address space */
17196 + if (addr > len) {
17197 + vma = find_vma(mm, addr-len);
17198 + if (check_heap_stack_gap(vma, addr - len, len))
17199 + /* remember the address as a hint for next time */
17200 + return (mm->free_area_cache = addr-len);
17201 + }
17202 +
17203 + if (mm->mmap_base < len)
17204 + goto bottomup;
17205 +
17206 + addr = mm->mmap_base-len;
17207 +
17208 + do {
17209 + /*
17210 + * Lookup failure means no vma is above this address,
17211 + * else if new region fits below vma->vm_start,
17212 + * return with success:
17213 + */
17214 + vma = find_vma(mm, addr);
17215 + if (check_heap_stack_gap(vma, addr, len))
17216 + /* remember the address as a hint for next time */
17217 + return (mm->free_area_cache = addr);
17218 +
17219 + /* remember the largest hole we saw so far */
17220 + if (addr + mm->cached_hole_size < vma->vm_start)
17221 + mm->cached_hole_size = vma->vm_start - addr;
17222 +
17223 + /* try just below the current vma->vm_start */
17224 + addr = skip_heap_stack_gap(vma, len);
17225 + } while (!IS_ERR_VALUE(addr));
17226 +
17227 +bottomup:
17228 + /*
17229 + * A failed mmap() very likely causes application failure,
17230 + * so fall back to the bottom-up function here. This scenario
17231 + * can happen with large stack limits and large mmap()
17232 + * allocations.
17233 + */
17234 +
17235 +#ifdef CONFIG_PAX_SEGMEXEC
17236 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17237 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17238 + else
17239 +#endif
17240 +
17241 + mm->mmap_base = TASK_UNMAPPED_BASE;
17242 +
17243 +#ifdef CONFIG_PAX_RANDMMAP
17244 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17245 + mm->mmap_base += mm->delta_mmap;
17246 +#endif
17247 +
17248 + mm->free_area_cache = mm->mmap_base;
17249 + mm->cached_hole_size = ~0UL;
17250 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17251 + /*
17252 + * Restore the topdown base:
17253 + */
17254 + mm->mmap_base = base;
17255 + mm->free_area_cache = base;
17256 + mm->cached_hole_size = ~0UL;
17257 +
17258 + return addr;
17259 }
17260 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17261 index 0514890..3dbebce 100644
17262 --- a/arch/x86/kernel/sys_x86_64.c
17263 +++ b/arch/x86/kernel/sys_x86_64.c
17264 @@ -95,8 +95,8 @@ out:
17265 return error;
17266 }
17267
17268 -static void find_start_end(unsigned long flags, unsigned long *begin,
17269 - unsigned long *end)
17270 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17271 + unsigned long *begin, unsigned long *end)
17272 {
17273 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17274 unsigned long new_begin;
17275 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17276 *begin = new_begin;
17277 }
17278 } else {
17279 - *begin = TASK_UNMAPPED_BASE;
17280 + *begin = mm->mmap_base;
17281 *end = TASK_SIZE;
17282 }
17283 }
17284 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17285 if (flags & MAP_FIXED)
17286 return addr;
17287
17288 - find_start_end(flags, &begin, &end);
17289 + find_start_end(mm, flags, &begin, &end);
17290
17291 if (len > end)
17292 return -ENOMEM;
17293
17294 +#ifdef CONFIG_PAX_RANDMMAP
17295 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17296 +#endif
17297 +
17298 if (addr) {
17299 addr = PAGE_ALIGN(addr);
17300 vma = find_vma(mm, addr);
17301 - if (end - len >= addr &&
17302 - (!vma || addr + len <= vma->vm_start))
17303 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17304 return addr;
17305 }
17306 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17307 @@ -172,7 +175,7 @@ full_search:
17308 }
17309 return -ENOMEM;
17310 }
17311 - if (!vma || addr + len <= vma->vm_start) {
17312 + if (check_heap_stack_gap(vma, addr, len)) {
17313 /*
17314 * Remember the place where we stopped the search:
17315 */
17316 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17317 {
17318 struct vm_area_struct *vma;
17319 struct mm_struct *mm = current->mm;
17320 - unsigned long addr = addr0;
17321 + unsigned long base = mm->mmap_base, addr = addr0;
17322
17323 /* requested length too big for entire address space */
17324 if (len > TASK_SIZE)
17325 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17326 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17327 goto bottomup;
17328
17329 +#ifdef CONFIG_PAX_RANDMMAP
17330 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17331 +#endif
17332 +
17333 /* requesting a specific address */
17334 if (addr) {
17335 addr = PAGE_ALIGN(addr);
17336 - vma = find_vma(mm, addr);
17337 - if (TASK_SIZE - len >= addr &&
17338 - (!vma || addr + len <= vma->vm_start))
17339 - return addr;
17340 + if (TASK_SIZE - len >= addr) {
17341 + vma = find_vma(mm, addr);
17342 + if (check_heap_stack_gap(vma, addr, len))
17343 + return addr;
17344 + }
17345 }
17346
17347 /* check if free_area_cache is useful for us */
17348 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17349 ALIGN_TOPDOWN);
17350
17351 vma = find_vma(mm, tmp_addr);
17352 - if (!vma || tmp_addr + len <= vma->vm_start)
17353 + if (check_heap_stack_gap(vma, tmp_addr, len))
17354 /* remember the address as a hint for next time */
17355 return mm->free_area_cache = tmp_addr;
17356 }
17357 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17358 * return with success:
17359 */
17360 vma = find_vma(mm, addr);
17361 - if (!vma || addr+len <= vma->vm_start)
17362 + if (check_heap_stack_gap(vma, addr, len))
17363 /* remember the address as a hint for next time */
17364 return mm->free_area_cache = addr;
17365
17366 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17367 mm->cached_hole_size = vma->vm_start - addr;
17368
17369 /* try just below the current vma->vm_start */
17370 - addr = vma->vm_start-len;
17371 - } while (len < vma->vm_start);
17372 + addr = skip_heap_stack_gap(vma, len);
17373 + } while (!IS_ERR_VALUE(addr));
17374
17375 bottomup:
17376 /*
17377 @@ -270,13 +278,21 @@ bottomup:
17378 * can happen with large stack limits and large mmap()
17379 * allocations.
17380 */
17381 + mm->mmap_base = TASK_UNMAPPED_BASE;
17382 +
17383 +#ifdef CONFIG_PAX_RANDMMAP
17384 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17385 + mm->mmap_base += mm->delta_mmap;
17386 +#endif
17387 +
17388 + mm->free_area_cache = mm->mmap_base;
17389 mm->cached_hole_size = ~0UL;
17390 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17391 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17392 /*
17393 * Restore the topdown base:
17394 */
17395 - mm->free_area_cache = mm->mmap_base;
17396 + mm->mmap_base = base;
17397 + mm->free_area_cache = base;
17398 mm->cached_hole_size = ~0UL;
17399
17400 return addr;
17401 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17402 index 9a0e312..e6f66f2 100644
17403 --- a/arch/x86/kernel/syscall_table_32.S
17404 +++ b/arch/x86/kernel/syscall_table_32.S
17405 @@ -1,3 +1,4 @@
17406 +.section .rodata,"a",@progbits
17407 ENTRY(sys_call_table)
17408 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17409 .long sys_exit
17410 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17411 index e2410e2..4fe3fbc 100644
17412 --- a/arch/x86/kernel/tboot.c
17413 +++ b/arch/x86/kernel/tboot.c
17414 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17415
17416 void tboot_shutdown(u32 shutdown_type)
17417 {
17418 - void (*shutdown)(void);
17419 + void (* __noreturn shutdown)(void);
17420
17421 if (!tboot_enabled())
17422 return;
17423 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17424
17425 switch_to_tboot_pt();
17426
17427 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17428 + shutdown = (void *)tboot->shutdown_entry;
17429 shutdown();
17430
17431 /* should not reach here */
17432 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17433 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17434 }
17435
17436 -static atomic_t ap_wfs_count;
17437 +static atomic_unchecked_t ap_wfs_count;
17438
17439 static int tboot_wait_for_aps(int num_aps)
17440 {
17441 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17442 {
17443 switch (action) {
17444 case CPU_DYING:
17445 - atomic_inc(&ap_wfs_count);
17446 + atomic_inc_unchecked(&ap_wfs_count);
17447 if (num_online_cpus() == 1)
17448 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17449 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17450 return NOTIFY_BAD;
17451 break;
17452 }
17453 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17454
17455 tboot_create_trampoline();
17456
17457 - atomic_set(&ap_wfs_count, 0);
17458 + atomic_set_unchecked(&ap_wfs_count, 0);
17459 register_hotcpu_notifier(&tboot_cpu_notifier);
17460 return 0;
17461 }
17462 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17463 index dd5fbf4..b7f2232 100644
17464 --- a/arch/x86/kernel/time.c
17465 +++ b/arch/x86/kernel/time.c
17466 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17467 {
17468 unsigned long pc = instruction_pointer(regs);
17469
17470 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17471 + if (!user_mode(regs) && in_lock_functions(pc)) {
17472 #ifdef CONFIG_FRAME_POINTER
17473 - return *(unsigned long *)(regs->bp + sizeof(long));
17474 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17475 #else
17476 unsigned long *sp =
17477 (unsigned long *)kernel_stack_pointer(regs);
17478 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17479 * or above a saved flags. Eflags has bits 22-31 zero,
17480 * kernel addresses don't.
17481 */
17482 +
17483 +#ifdef CONFIG_PAX_KERNEXEC
17484 + return ktla_ktva(sp[0]);
17485 +#else
17486 if (sp[0] >> 22)
17487 return sp[0];
17488 if (sp[1] >> 22)
17489 return sp[1];
17490 #endif
17491 +
17492 +#endif
17493 }
17494 return pc;
17495 }
17496 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17497 index 6bb7b85..dd853e1 100644
17498 --- a/arch/x86/kernel/tls.c
17499 +++ b/arch/x86/kernel/tls.c
17500 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17501 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17502 return -EINVAL;
17503
17504 +#ifdef CONFIG_PAX_SEGMEXEC
17505 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17506 + return -EINVAL;
17507 +#endif
17508 +
17509 set_tls_desc(p, idx, &info, 1);
17510
17511 return 0;
17512 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17513 index 451c0a7..e57f551 100644
17514 --- a/arch/x86/kernel/trampoline_32.S
17515 +++ b/arch/x86/kernel/trampoline_32.S
17516 @@ -32,6 +32,12 @@
17517 #include <asm/segment.h>
17518 #include <asm/page_types.h>
17519
17520 +#ifdef CONFIG_PAX_KERNEXEC
17521 +#define ta(X) (X)
17522 +#else
17523 +#define ta(X) ((X) - __PAGE_OFFSET)
17524 +#endif
17525 +
17526 #ifdef CONFIG_SMP
17527
17528 .section ".x86_trampoline","a"
17529 @@ -62,7 +68,7 @@ r_base = .
17530 inc %ax # protected mode (PE) bit
17531 lmsw %ax # into protected mode
17532 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17533 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17534 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17535
17536 # These need to be in the same 64K segment as the above;
17537 # hence we don't use the boot_gdt_descr defined in head.S
17538 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17539 index 09ff517..df19fbff 100644
17540 --- a/arch/x86/kernel/trampoline_64.S
17541 +++ b/arch/x86/kernel/trampoline_64.S
17542 @@ -90,7 +90,7 @@ startup_32:
17543 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17544 movl %eax, %ds
17545
17546 - movl $X86_CR4_PAE, %eax
17547 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17548 movl %eax, %cr4 # Enable PAE mode
17549
17550 # Setup trampoline 4 level pagetables
17551 @@ -138,7 +138,7 @@ tidt:
17552 # so the kernel can live anywhere
17553 .balign 4
17554 tgdt:
17555 - .short tgdt_end - tgdt # gdt limit
17556 + .short tgdt_end - tgdt - 1 # gdt limit
17557 .long tgdt - r_base
17558 .short 0
17559 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17560 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17561 index a8e3eb8..c9dbd7d 100644
17562 --- a/arch/x86/kernel/traps.c
17563 +++ b/arch/x86/kernel/traps.c
17564 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17565
17566 /* Do we ignore FPU interrupts ? */
17567 char ignore_fpu_irq;
17568 -
17569 -/*
17570 - * The IDT has to be page-aligned to simplify the Pentium
17571 - * F0 0F bug workaround.
17572 - */
17573 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17574 #endif
17575
17576 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17577 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17578 }
17579
17580 static void __kprobes
17581 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17582 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17583 long error_code, siginfo_t *info)
17584 {
17585 struct task_struct *tsk = current;
17586
17587 #ifdef CONFIG_X86_32
17588 - if (regs->flags & X86_VM_MASK) {
17589 + if (v8086_mode(regs)) {
17590 /*
17591 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17592 * On nmi (interrupt 2), do_trap should not be called.
17593 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17594 }
17595 #endif
17596
17597 - if (!user_mode(regs))
17598 + if (!user_mode_novm(regs))
17599 goto kernel_trap;
17600
17601 #ifdef CONFIG_X86_32
17602 @@ -148,7 +142,7 @@ trap_signal:
17603 printk_ratelimit()) {
17604 printk(KERN_INFO
17605 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17606 - tsk->comm, tsk->pid, str,
17607 + tsk->comm, task_pid_nr(tsk), str,
17608 regs->ip, regs->sp, error_code);
17609 print_vma_addr(" in ", regs->ip);
17610 printk("\n");
17611 @@ -165,8 +159,20 @@ kernel_trap:
17612 if (!fixup_exception(regs)) {
17613 tsk->thread.error_code = error_code;
17614 tsk->thread.trap_no = trapnr;
17615 +
17616 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17617 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17618 + str = "PAX: suspicious stack segment fault";
17619 +#endif
17620 +
17621 die(str, regs, error_code);
17622 }
17623 +
17624 +#ifdef CONFIG_PAX_REFCOUNT
17625 + if (trapnr == 4)
17626 + pax_report_refcount_overflow(regs);
17627 +#endif
17628 +
17629 return;
17630
17631 #ifdef CONFIG_X86_32
17632 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17633 conditional_sti(regs);
17634
17635 #ifdef CONFIG_X86_32
17636 - if (regs->flags & X86_VM_MASK)
17637 + if (v8086_mode(regs))
17638 goto gp_in_vm86;
17639 #endif
17640
17641 tsk = current;
17642 - if (!user_mode(regs))
17643 + if (!user_mode_novm(regs))
17644 goto gp_in_kernel;
17645
17646 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17647 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17648 + struct mm_struct *mm = tsk->mm;
17649 + unsigned long limit;
17650 +
17651 + down_write(&mm->mmap_sem);
17652 + limit = mm->context.user_cs_limit;
17653 + if (limit < TASK_SIZE) {
17654 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17655 + up_write(&mm->mmap_sem);
17656 + return;
17657 + }
17658 + up_write(&mm->mmap_sem);
17659 + }
17660 +#endif
17661 +
17662 tsk->thread.error_code = error_code;
17663 tsk->thread.trap_no = 13;
17664
17665 @@ -295,6 +317,13 @@ gp_in_kernel:
17666 if (notify_die(DIE_GPF, "general protection fault", regs,
17667 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17668 return;
17669 +
17670 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17671 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17672 + die("PAX: suspicious general protection fault", regs, error_code);
17673 + else
17674 +#endif
17675 +
17676 die("general protection fault", regs, error_code);
17677 }
17678
17679 @@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17680 /* It's safe to allow irq's after DR6 has been saved */
17681 preempt_conditional_sti(regs);
17682
17683 - if (regs->flags & X86_VM_MASK) {
17684 + if (v8086_mode(regs)) {
17685 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17686 error_code, 1);
17687 preempt_conditional_cli(regs);
17688 @@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17689 * We already checked v86 mode above, so we can check for kernel mode
17690 * by just checking the CPL of CS.
17691 */
17692 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
17693 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17694 tsk->thread.debugreg6 &= ~DR_STEP;
17695 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17696 regs->flags &= ~X86_EFLAGS_TF;
17697 @@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17698 return;
17699 conditional_sti(regs);
17700
17701 - if (!user_mode_vm(regs))
17702 + if (!user_mode(regs))
17703 {
17704 if (!fixup_exception(regs)) {
17705 task->thread.error_code = error_code;
17706 @@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17707 void __math_state_restore(void)
17708 {
17709 struct thread_info *thread = current_thread_info();
17710 - struct task_struct *tsk = thread->task;
17711 + struct task_struct *tsk = current;
17712
17713 /*
17714 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17715 @@ -595,8 +624,7 @@ void __math_state_restore(void)
17716 */
17717 asmlinkage void math_state_restore(void)
17718 {
17719 - struct thread_info *thread = current_thread_info();
17720 - struct task_struct *tsk = thread->task;
17721 + struct task_struct *tsk = current;
17722
17723 if (!tsk_used_math(tsk)) {
17724 local_irq_enable();
17725 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17726 index b9242ba..50c5edd 100644
17727 --- a/arch/x86/kernel/verify_cpu.S
17728 +++ b/arch/x86/kernel/verify_cpu.S
17729 @@ -20,6 +20,7 @@
17730 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17731 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17732 * arch/x86/kernel/head_32.S: processor startup
17733 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17734 *
17735 * verify_cpu, returns the status of longmode and SSE in register %eax.
17736 * 0: Success 1: Failure
17737 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17738 index 863f875..4307295 100644
17739 --- a/arch/x86/kernel/vm86_32.c
17740 +++ b/arch/x86/kernel/vm86_32.c
17741 @@ -41,6 +41,7 @@
17742 #include <linux/ptrace.h>
17743 #include <linux/audit.h>
17744 #include <linux/stddef.h>
17745 +#include <linux/grsecurity.h>
17746
17747 #include <asm/uaccess.h>
17748 #include <asm/io.h>
17749 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17750 do_exit(SIGSEGV);
17751 }
17752
17753 - tss = &per_cpu(init_tss, get_cpu());
17754 + tss = init_tss + get_cpu();
17755 current->thread.sp0 = current->thread.saved_sp0;
17756 current->thread.sysenter_cs = __KERNEL_CS;
17757 load_sp0(tss, &current->thread);
17758 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17759 struct task_struct *tsk;
17760 int tmp, ret = -EPERM;
17761
17762 +#ifdef CONFIG_GRKERNSEC_VM86
17763 + if (!capable(CAP_SYS_RAWIO)) {
17764 + gr_handle_vm86();
17765 + goto out;
17766 + }
17767 +#endif
17768 +
17769 tsk = current;
17770 if (tsk->thread.saved_sp0)
17771 goto out;
17772 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17773 int tmp, ret;
17774 struct vm86plus_struct __user *v86;
17775
17776 +#ifdef CONFIG_GRKERNSEC_VM86
17777 + if (!capable(CAP_SYS_RAWIO)) {
17778 + gr_handle_vm86();
17779 + ret = -EPERM;
17780 + goto out;
17781 + }
17782 +#endif
17783 +
17784 tsk = current;
17785 switch (cmd) {
17786 case VM86_REQUEST_IRQ:
17787 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17788 tsk->thread.saved_fs = info->regs32->fs;
17789 tsk->thread.saved_gs = get_user_gs(info->regs32);
17790
17791 - tss = &per_cpu(init_tss, get_cpu());
17792 + tss = init_tss + get_cpu();
17793 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17794 if (cpu_has_sep)
17795 tsk->thread.sysenter_cs = 0;
17796 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17797 goto cannot_handle;
17798 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17799 goto cannot_handle;
17800 - intr_ptr = (unsigned long __user *) (i << 2);
17801 + intr_ptr = (__force unsigned long __user *) (i << 2);
17802 if (get_user(segoffs, intr_ptr))
17803 goto cannot_handle;
17804 if ((segoffs >> 16) == BIOSSEG)
17805 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17806 index 0f703f1..9e15f64 100644
17807 --- a/arch/x86/kernel/vmlinux.lds.S
17808 +++ b/arch/x86/kernel/vmlinux.lds.S
17809 @@ -26,6 +26,13 @@
17810 #include <asm/page_types.h>
17811 #include <asm/cache.h>
17812 #include <asm/boot.h>
17813 +#include <asm/segment.h>
17814 +
17815 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17816 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17817 +#else
17818 +#define __KERNEL_TEXT_OFFSET 0
17819 +#endif
17820
17821 #undef i386 /* in case the preprocessor is a 32bit one */
17822
17823 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17824
17825 PHDRS {
17826 text PT_LOAD FLAGS(5); /* R_E */
17827 +#ifdef CONFIG_X86_32
17828 + module PT_LOAD FLAGS(5); /* R_E */
17829 +#endif
17830 +#ifdef CONFIG_XEN
17831 + rodata PT_LOAD FLAGS(5); /* R_E */
17832 +#else
17833 + rodata PT_LOAD FLAGS(4); /* R__ */
17834 +#endif
17835 data PT_LOAD FLAGS(6); /* RW_ */
17836 -#ifdef CONFIG_X86_64
17837 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17838 #ifdef CONFIG_SMP
17839 percpu PT_LOAD FLAGS(6); /* RW_ */
17840 #endif
17841 + text.init PT_LOAD FLAGS(5); /* R_E */
17842 + text.exit PT_LOAD FLAGS(5); /* R_E */
17843 init PT_LOAD FLAGS(7); /* RWE */
17844 -#endif
17845 note PT_NOTE FLAGS(0); /* ___ */
17846 }
17847
17848 SECTIONS
17849 {
17850 #ifdef CONFIG_X86_32
17851 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17852 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17853 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17854 #else
17855 - . = __START_KERNEL;
17856 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17857 + . = __START_KERNEL;
17858 #endif
17859
17860 /* Text and read-only data */
17861 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17862 - _text = .;
17863 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17864 /* bootstrapping code */
17865 +#ifdef CONFIG_X86_32
17866 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17867 +#else
17868 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17869 +#endif
17870 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17871 + _text = .;
17872 HEAD_TEXT
17873 #ifdef CONFIG_X86_32
17874 . = ALIGN(PAGE_SIZE);
17875 @@ -108,13 +128,47 @@ SECTIONS
17876 IRQENTRY_TEXT
17877 *(.fixup)
17878 *(.gnu.warning)
17879 - /* End of text section */
17880 - _etext = .;
17881 } :text = 0x9090
17882
17883 - NOTES :text :note
17884 + . += __KERNEL_TEXT_OFFSET;
17885
17886 - EXCEPTION_TABLE(16) :text = 0x9090
17887 +#ifdef CONFIG_X86_32
17888 + . = ALIGN(PAGE_SIZE);
17889 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17890 +
17891 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17892 + MODULES_EXEC_VADDR = .;
17893 + BYTE(0)
17894 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17895 + . = ALIGN(HPAGE_SIZE);
17896 + MODULES_EXEC_END = . - 1;
17897 +#endif
17898 +
17899 + } :module
17900 +#endif
17901 +
17902 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17903 + /* End of text section */
17904 + _etext = . - __KERNEL_TEXT_OFFSET;
17905 + }
17906 +
17907 +#ifdef CONFIG_X86_32
17908 + . = ALIGN(PAGE_SIZE);
17909 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17910 + *(.idt)
17911 + . = ALIGN(PAGE_SIZE);
17912 + *(.empty_zero_page)
17913 + *(.initial_pg_fixmap)
17914 + *(.initial_pg_pmd)
17915 + *(.initial_page_table)
17916 + *(.swapper_pg_dir)
17917 + } :rodata
17918 +#endif
17919 +
17920 + . = ALIGN(PAGE_SIZE);
17921 + NOTES :rodata :note
17922 +
17923 + EXCEPTION_TABLE(16) :rodata
17924
17925 #if defined(CONFIG_DEBUG_RODATA)
17926 /* .text should occupy whole number of pages */
17927 @@ -126,16 +180,20 @@ SECTIONS
17928
17929 /* Data */
17930 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17931 +
17932 +#ifdef CONFIG_PAX_KERNEXEC
17933 + . = ALIGN(HPAGE_SIZE);
17934 +#else
17935 + . = ALIGN(PAGE_SIZE);
17936 +#endif
17937 +
17938 /* Start of data section */
17939 _sdata = .;
17940
17941 /* init_task */
17942 INIT_TASK_DATA(THREAD_SIZE)
17943
17944 -#ifdef CONFIG_X86_32
17945 - /* 32 bit has nosave before _edata */
17946 NOSAVE_DATA
17947 -#endif
17948
17949 PAGE_ALIGNED_DATA(PAGE_SIZE)
17950
17951 @@ -176,12 +234,19 @@ SECTIONS
17952 #endif /* CONFIG_X86_64 */
17953
17954 /* Init code and data - will be freed after init */
17955 - . = ALIGN(PAGE_SIZE);
17956 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17957 + BYTE(0)
17958 +
17959 +#ifdef CONFIG_PAX_KERNEXEC
17960 + . = ALIGN(HPAGE_SIZE);
17961 +#else
17962 + . = ALIGN(PAGE_SIZE);
17963 +#endif
17964 +
17965 __init_begin = .; /* paired with __init_end */
17966 - }
17967 + } :init.begin
17968
17969 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17970 +#ifdef CONFIG_SMP
17971 /*
17972 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17973 * output PHDR, so the next output section - .init.text - should
17974 @@ -190,12 +255,27 @@ SECTIONS
17975 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17976 #endif
17977
17978 - INIT_TEXT_SECTION(PAGE_SIZE)
17979 -#ifdef CONFIG_X86_64
17980 - :init
17981 -#endif
17982 + . = ALIGN(PAGE_SIZE);
17983 + init_begin = .;
17984 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17985 + VMLINUX_SYMBOL(_sinittext) = .;
17986 + INIT_TEXT
17987 + VMLINUX_SYMBOL(_einittext) = .;
17988 + . = ALIGN(PAGE_SIZE);
17989 + } :text.init
17990
17991 - INIT_DATA_SECTION(16)
17992 + /*
17993 + * .exit.text is discard at runtime, not link time, to deal with
17994 + * references from .altinstructions and .eh_frame
17995 + */
17996 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17997 + EXIT_TEXT
17998 + . = ALIGN(16);
17999 + } :text.exit
18000 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18001 +
18002 + . = ALIGN(PAGE_SIZE);
18003 + INIT_DATA_SECTION(16) :init
18004
18005 /*
18006 * Code and data for a variety of lowlevel trampolines, to be
18007 @@ -269,19 +349,12 @@ SECTIONS
18008 }
18009
18010 . = ALIGN(8);
18011 - /*
18012 - * .exit.text is discard at runtime, not link time, to deal with
18013 - * references from .altinstructions and .eh_frame
18014 - */
18015 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18016 - EXIT_TEXT
18017 - }
18018
18019 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18020 EXIT_DATA
18021 }
18022
18023 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18024 +#ifndef CONFIG_SMP
18025 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18026 #endif
18027
18028 @@ -300,16 +373,10 @@ SECTIONS
18029 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18030 __smp_locks = .;
18031 *(.smp_locks)
18032 - . = ALIGN(PAGE_SIZE);
18033 __smp_locks_end = .;
18034 + . = ALIGN(PAGE_SIZE);
18035 }
18036
18037 -#ifdef CONFIG_X86_64
18038 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18039 - NOSAVE_DATA
18040 - }
18041 -#endif
18042 -
18043 /* BSS */
18044 . = ALIGN(PAGE_SIZE);
18045 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18046 @@ -325,6 +392,7 @@ SECTIONS
18047 __brk_base = .;
18048 . += 64 * 1024; /* 64k alignment slop space */
18049 *(.brk_reservation) /* areas brk users have reserved */
18050 + . = ALIGN(HPAGE_SIZE);
18051 __brk_limit = .;
18052 }
18053
18054 @@ -351,13 +419,12 @@ SECTIONS
18055 * for the boot processor.
18056 */
18057 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18058 -INIT_PER_CPU(gdt_page);
18059 INIT_PER_CPU(irq_stack_union);
18060
18061 /*
18062 * Build-time check on the image size:
18063 */
18064 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18065 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18066 "kernel image bigger than KERNEL_IMAGE_SIZE");
18067
18068 #ifdef CONFIG_SMP
18069 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18070 index e4d4a22..47ee71f 100644
18071 --- a/arch/x86/kernel/vsyscall_64.c
18072 +++ b/arch/x86/kernel/vsyscall_64.c
18073 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18074 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18075 };
18076
18077 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18078 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18079
18080 static int __init vsyscall_setup(char *str)
18081 {
18082 if (str) {
18083 if (!strcmp("emulate", str))
18084 vsyscall_mode = EMULATE;
18085 - else if (!strcmp("native", str))
18086 - vsyscall_mode = NATIVE;
18087 else if (!strcmp("none", str))
18088 vsyscall_mode = NONE;
18089 else
18090 @@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18091
18092 tsk = current;
18093 if (seccomp_mode(&tsk->seccomp))
18094 - do_exit(SIGKILL);
18095 + do_group_exit(SIGKILL);
18096
18097 switch (vsyscall_nr) {
18098 case 0:
18099 @@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18100 return true;
18101
18102 sigsegv:
18103 - force_sig(SIGSEGV, current);
18104 - return true;
18105 + do_group_exit(SIGKILL);
18106 }
18107
18108 /*
18109 @@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18110 extern char __vvar_page;
18111 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18112
18113 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18114 - vsyscall_mode == NATIVE
18115 - ? PAGE_KERNEL_VSYSCALL
18116 - : PAGE_KERNEL_VVAR);
18117 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18118 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18119 (unsigned long)VSYSCALL_START);
18120
18121 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18122 index 9796c2f..f686fbf 100644
18123 --- a/arch/x86/kernel/x8664_ksyms_64.c
18124 +++ b/arch/x86/kernel/x8664_ksyms_64.c
18125 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18126 EXPORT_SYMBOL(copy_user_generic_string);
18127 EXPORT_SYMBOL(copy_user_generic_unrolled);
18128 EXPORT_SYMBOL(__copy_user_nocache);
18129 -EXPORT_SYMBOL(_copy_from_user);
18130 -EXPORT_SYMBOL(_copy_to_user);
18131
18132 EXPORT_SYMBOL(copy_page);
18133 EXPORT_SYMBOL(clear_page);
18134 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18135 index a391134..d0b63b6e 100644
18136 --- a/arch/x86/kernel/xsave.c
18137 +++ b/arch/x86/kernel/xsave.c
18138 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18139 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18140 return -EINVAL;
18141
18142 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18143 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18144 fx_sw_user->extended_size -
18145 FP_XSTATE_MAGIC2_SIZE));
18146 if (err)
18147 @@ -267,7 +267,7 @@ fx_only:
18148 * the other extended state.
18149 */
18150 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18151 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18152 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18153 }
18154
18155 /*
18156 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18157 if (use_xsave())
18158 err = restore_user_xstate(buf);
18159 else
18160 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18161 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18162 buf);
18163 if (unlikely(err)) {
18164 /*
18165 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18166 index f1e3be1..588efc8 100644
18167 --- a/arch/x86/kvm/emulate.c
18168 +++ b/arch/x86/kvm/emulate.c
18169 @@ -249,6 +249,7 @@ struct gprefix {
18170
18171 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18172 do { \
18173 + unsigned long _tmp; \
18174 __asm__ __volatile__ ( \
18175 _PRE_EFLAGS("0", "4", "2") \
18176 _op _suffix " %"_x"3,%1; " \
18177 @@ -263,8 +264,6 @@ struct gprefix {
18178 /* Raw emulation: instruction has two explicit operands. */
18179 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18180 do { \
18181 - unsigned long _tmp; \
18182 - \
18183 switch ((ctxt)->dst.bytes) { \
18184 case 2: \
18185 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18186 @@ -280,7 +279,6 @@ struct gprefix {
18187
18188 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18189 do { \
18190 - unsigned long _tmp; \
18191 switch ((ctxt)->dst.bytes) { \
18192 case 1: \
18193 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18194 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18195 index 54abb40..a192606 100644
18196 --- a/arch/x86/kvm/lapic.c
18197 +++ b/arch/x86/kvm/lapic.c
18198 @@ -53,7 +53,7 @@
18199 #define APIC_BUS_CYCLE_NS 1
18200
18201 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18202 -#define apic_debug(fmt, arg...)
18203 +#define apic_debug(fmt, arg...) do {} while (0)
18204
18205 #define APIC_LVT_NUM 6
18206 /* 14 is the version for Xeon and Pentium 8.4.8*/
18207 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18208 index f1b36cf..af8a124 100644
18209 --- a/arch/x86/kvm/mmu.c
18210 +++ b/arch/x86/kvm/mmu.c
18211 @@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18212
18213 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18214
18215 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18216 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18217
18218 /*
18219 * Assume that the pte write on a page table of the same type
18220 @@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18221 }
18222
18223 spin_lock(&vcpu->kvm->mmu_lock);
18224 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18225 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18226 gentry = 0;
18227 kvm_mmu_free_some_pages(vcpu);
18228 ++vcpu->kvm->stat.mmu_pte_write;
18229 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18230 index 9299410..ade2f9b 100644
18231 --- a/arch/x86/kvm/paging_tmpl.h
18232 +++ b/arch/x86/kvm/paging_tmpl.h
18233 @@ -197,7 +197,7 @@ retry_walk:
18234 if (unlikely(kvm_is_error_hva(host_addr)))
18235 goto error;
18236
18237 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18238 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18239 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18240 goto error;
18241
18242 @@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18243 if (need_flush)
18244 kvm_flush_remote_tlbs(vcpu->kvm);
18245
18246 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18247 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18248
18249 spin_unlock(&vcpu->kvm->mmu_lock);
18250
18251 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18252 index e32243e..a6e6172 100644
18253 --- a/arch/x86/kvm/svm.c
18254 +++ b/arch/x86/kvm/svm.c
18255 @@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18256 int cpu = raw_smp_processor_id();
18257
18258 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18259 +
18260 + pax_open_kernel();
18261 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18262 + pax_close_kernel();
18263 +
18264 load_TR_desc();
18265 }
18266
18267 @@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18268 #endif
18269 #endif
18270
18271 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18272 + __set_fs(current_thread_info()->addr_limit);
18273 +#endif
18274 +
18275 reload_tss(vcpu);
18276
18277 local_irq_disable();
18278 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18279 index 579a0b5..ed7bbf9 100644
18280 --- a/arch/x86/kvm/vmx.c
18281 +++ b/arch/x86/kvm/vmx.c
18282 @@ -1305,7 +1305,11 @@ static void reload_tss(void)
18283 struct desc_struct *descs;
18284
18285 descs = (void *)gdt->address;
18286 +
18287 + pax_open_kernel();
18288 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18289 + pax_close_kernel();
18290 +
18291 load_TR_desc();
18292 }
18293
18294 @@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18295 if (!cpu_has_vmx_flexpriority())
18296 flexpriority_enabled = 0;
18297
18298 - if (!cpu_has_vmx_tpr_shadow())
18299 - kvm_x86_ops->update_cr8_intercept = NULL;
18300 + if (!cpu_has_vmx_tpr_shadow()) {
18301 + pax_open_kernel();
18302 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18303 + pax_close_kernel();
18304 + }
18305
18306 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18307 kvm_disable_largepages();
18308 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18309 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18310
18311 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18312 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18313 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18314
18315 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18316 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18317 @@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18318 "jmp .Lkvm_vmx_return \n\t"
18319 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18320 ".Lkvm_vmx_return: "
18321 +
18322 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18323 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18324 + ".Lkvm_vmx_return2: "
18325 +#endif
18326 +
18327 /* Save guest registers, load host registers, keep flags */
18328 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18329 "pop %0 \n\t"
18330 @@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18331 #endif
18332 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18333 [wordsize]"i"(sizeof(ulong))
18334 +
18335 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18336 + ,[cs]"i"(__KERNEL_CS)
18337 +#endif
18338 +
18339 : "cc", "memory"
18340 , R"ax", R"bx", R"di", R"si"
18341 #ifdef CONFIG_X86_64
18342 @@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18343 }
18344 }
18345
18346 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18347 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18348 +
18349 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18350 + loadsegment(fs, __KERNEL_PERCPU);
18351 +#endif
18352 +
18353 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18354 + __set_fs(current_thread_info()->addr_limit);
18355 +#endif
18356 +
18357 vmx->loaded_vmcs->launched = 1;
18358
18359 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18360 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18361 index 4c938da..4ddef65 100644
18362 --- a/arch/x86/kvm/x86.c
18363 +++ b/arch/x86/kvm/x86.c
18364 @@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18365 {
18366 struct kvm *kvm = vcpu->kvm;
18367 int lm = is_long_mode(vcpu);
18368 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18369 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18370 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18371 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18372 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18373 : kvm->arch.xen_hvm_config.blob_size_32;
18374 u32 page_num = data & ~PAGE_MASK;
18375 @@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18376 if (n < msr_list.nmsrs)
18377 goto out;
18378 r = -EFAULT;
18379 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18380 + goto out;
18381 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18382 num_msrs_to_save * sizeof(u32)))
18383 goto out;
18384 @@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18385 struct kvm_cpuid2 *cpuid,
18386 struct kvm_cpuid_entry2 __user *entries)
18387 {
18388 - int r;
18389 + int r, i;
18390
18391 r = -E2BIG;
18392 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18393 goto out;
18394 r = -EFAULT;
18395 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18396 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18397 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18398 goto out;
18399 + for (i = 0; i < cpuid->nent; ++i) {
18400 + struct kvm_cpuid_entry2 cpuid_entry;
18401 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18402 + goto out;
18403 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18404 + }
18405 vcpu->arch.cpuid_nent = cpuid->nent;
18406 kvm_apic_set_version(vcpu);
18407 kvm_x86_ops->cpuid_update(vcpu);
18408 @@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18409 struct kvm_cpuid2 *cpuid,
18410 struct kvm_cpuid_entry2 __user *entries)
18411 {
18412 - int r;
18413 + int r, i;
18414
18415 r = -E2BIG;
18416 if (cpuid->nent < vcpu->arch.cpuid_nent)
18417 goto out;
18418 r = -EFAULT;
18419 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18420 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18421 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18422 goto out;
18423 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18424 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18425 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18426 + goto out;
18427 + }
18428 return 0;
18429
18430 out:
18431 @@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18432 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18433 struct kvm_interrupt *irq)
18434 {
18435 - if (irq->irq < 0 || irq->irq >= 256)
18436 + if (irq->irq >= 256)
18437 return -EINVAL;
18438 if (irqchip_in_kernel(vcpu->kvm))
18439 return -ENXIO;
18440 @@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18441 kvm_mmu_set_mmio_spte_mask(mask);
18442 }
18443
18444 -int kvm_arch_init(void *opaque)
18445 +int kvm_arch_init(const void *opaque)
18446 {
18447 int r;
18448 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18449 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18450 index cf4603b..7cdde38 100644
18451 --- a/arch/x86/lguest/boot.c
18452 +++ b/arch/x86/lguest/boot.c
18453 @@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18454 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18455 * Launcher to reboot us.
18456 */
18457 -static void lguest_restart(char *reason)
18458 +static __noreturn void lguest_restart(char *reason)
18459 {
18460 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18461 + BUG();
18462 }
18463
18464 /*G:050
18465 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18466 index 042f682..c92afb6 100644
18467 --- a/arch/x86/lib/atomic64_32.c
18468 +++ b/arch/x86/lib/atomic64_32.c
18469 @@ -8,18 +8,30 @@
18470
18471 long long atomic64_read_cx8(long long, const atomic64_t *v);
18472 EXPORT_SYMBOL(atomic64_read_cx8);
18473 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18474 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18475 long long atomic64_set_cx8(long long, const atomic64_t *v);
18476 EXPORT_SYMBOL(atomic64_set_cx8);
18477 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18478 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18479 long long atomic64_xchg_cx8(long long, unsigned high);
18480 EXPORT_SYMBOL(atomic64_xchg_cx8);
18481 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18482 EXPORT_SYMBOL(atomic64_add_return_cx8);
18483 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18484 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18485 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18486 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18487 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18488 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18489 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18490 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18491 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18492 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18493 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18494 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18495 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18496 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18497 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18498 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18499 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18500 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18501 #ifndef CONFIG_X86_CMPXCHG64
18502 long long atomic64_read_386(long long, const atomic64_t *v);
18503 EXPORT_SYMBOL(atomic64_read_386);
18504 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18505 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
18506 long long atomic64_set_386(long long, const atomic64_t *v);
18507 EXPORT_SYMBOL(atomic64_set_386);
18508 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18509 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
18510 long long atomic64_xchg_386(long long, unsigned high);
18511 EXPORT_SYMBOL(atomic64_xchg_386);
18512 long long atomic64_add_return_386(long long a, atomic64_t *v);
18513 EXPORT_SYMBOL(atomic64_add_return_386);
18514 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18515 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18516 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18517 EXPORT_SYMBOL(atomic64_sub_return_386);
18518 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18519 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18520 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18521 EXPORT_SYMBOL(atomic64_inc_return_386);
18522 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18523 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18524 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18525 EXPORT_SYMBOL(atomic64_dec_return_386);
18526 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18527 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18528 long long atomic64_add_386(long long a, atomic64_t *v);
18529 EXPORT_SYMBOL(atomic64_add_386);
18530 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18531 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
18532 long long atomic64_sub_386(long long a, atomic64_t *v);
18533 EXPORT_SYMBOL(atomic64_sub_386);
18534 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18535 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18536 long long atomic64_inc_386(long long a, atomic64_t *v);
18537 EXPORT_SYMBOL(atomic64_inc_386);
18538 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18539 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18540 long long atomic64_dec_386(long long a, atomic64_t *v);
18541 EXPORT_SYMBOL(atomic64_dec_386);
18542 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18543 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18544 long long atomic64_dec_if_positive_386(atomic64_t *v);
18545 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18546 int atomic64_inc_not_zero_386(atomic64_t *v);
18547 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18548 index e8e7e0d..56fd1b0 100644
18549 --- a/arch/x86/lib/atomic64_386_32.S
18550 +++ b/arch/x86/lib/atomic64_386_32.S
18551 @@ -48,6 +48,10 @@ BEGIN(read)
18552 movl (v), %eax
18553 movl 4(v), %edx
18554 RET_ENDP
18555 +BEGIN(read_unchecked)
18556 + movl (v), %eax
18557 + movl 4(v), %edx
18558 +RET_ENDP
18559 #undef v
18560
18561 #define v %esi
18562 @@ -55,6 +59,10 @@ BEGIN(set)
18563 movl %ebx, (v)
18564 movl %ecx, 4(v)
18565 RET_ENDP
18566 +BEGIN(set_unchecked)
18567 + movl %ebx, (v)
18568 + movl %ecx, 4(v)
18569 +RET_ENDP
18570 #undef v
18571
18572 #define v %esi
18573 @@ -70,6 +78,20 @@ RET_ENDP
18574 BEGIN(add)
18575 addl %eax, (v)
18576 adcl %edx, 4(v)
18577 +
18578 +#ifdef CONFIG_PAX_REFCOUNT
18579 + jno 0f
18580 + subl %eax, (v)
18581 + sbbl %edx, 4(v)
18582 + int $4
18583 +0:
18584 + _ASM_EXTABLE(0b, 0b)
18585 +#endif
18586 +
18587 +RET_ENDP
18588 +BEGIN(add_unchecked)
18589 + addl %eax, (v)
18590 + adcl %edx, 4(v)
18591 RET_ENDP
18592 #undef v
18593
18594 @@ -77,6 +99,24 @@ RET_ENDP
18595 BEGIN(add_return)
18596 addl (v), %eax
18597 adcl 4(v), %edx
18598 +
18599 +#ifdef CONFIG_PAX_REFCOUNT
18600 + into
18601 +1234:
18602 + _ASM_EXTABLE(1234b, 2f)
18603 +#endif
18604 +
18605 + movl %eax, (v)
18606 + movl %edx, 4(v)
18607 +
18608 +#ifdef CONFIG_PAX_REFCOUNT
18609 +2:
18610 +#endif
18611 +
18612 +RET_ENDP
18613 +BEGIN(add_return_unchecked)
18614 + addl (v), %eax
18615 + adcl 4(v), %edx
18616 movl %eax, (v)
18617 movl %edx, 4(v)
18618 RET_ENDP
18619 @@ -86,6 +126,20 @@ RET_ENDP
18620 BEGIN(sub)
18621 subl %eax, (v)
18622 sbbl %edx, 4(v)
18623 +
18624 +#ifdef CONFIG_PAX_REFCOUNT
18625 + jno 0f
18626 + addl %eax, (v)
18627 + adcl %edx, 4(v)
18628 + int $4
18629 +0:
18630 + _ASM_EXTABLE(0b, 0b)
18631 +#endif
18632 +
18633 +RET_ENDP
18634 +BEGIN(sub_unchecked)
18635 + subl %eax, (v)
18636 + sbbl %edx, 4(v)
18637 RET_ENDP
18638 #undef v
18639
18640 @@ -96,6 +150,27 @@ BEGIN(sub_return)
18641 sbbl $0, %edx
18642 addl (v), %eax
18643 adcl 4(v), %edx
18644 +
18645 +#ifdef CONFIG_PAX_REFCOUNT
18646 + into
18647 +1234:
18648 + _ASM_EXTABLE(1234b, 2f)
18649 +#endif
18650 +
18651 + movl %eax, (v)
18652 + movl %edx, 4(v)
18653 +
18654 +#ifdef CONFIG_PAX_REFCOUNT
18655 +2:
18656 +#endif
18657 +
18658 +RET_ENDP
18659 +BEGIN(sub_return_unchecked)
18660 + negl %edx
18661 + negl %eax
18662 + sbbl $0, %edx
18663 + addl (v), %eax
18664 + adcl 4(v), %edx
18665 movl %eax, (v)
18666 movl %edx, 4(v)
18667 RET_ENDP
18668 @@ -105,6 +180,20 @@ RET_ENDP
18669 BEGIN(inc)
18670 addl $1, (v)
18671 adcl $0, 4(v)
18672 +
18673 +#ifdef CONFIG_PAX_REFCOUNT
18674 + jno 0f
18675 + subl $1, (v)
18676 + sbbl $0, 4(v)
18677 + int $4
18678 +0:
18679 + _ASM_EXTABLE(0b, 0b)
18680 +#endif
18681 +
18682 +RET_ENDP
18683 +BEGIN(inc_unchecked)
18684 + addl $1, (v)
18685 + adcl $0, 4(v)
18686 RET_ENDP
18687 #undef v
18688
18689 @@ -114,6 +203,26 @@ BEGIN(inc_return)
18690 movl 4(v), %edx
18691 addl $1, %eax
18692 adcl $0, %edx
18693 +
18694 +#ifdef CONFIG_PAX_REFCOUNT
18695 + into
18696 +1234:
18697 + _ASM_EXTABLE(1234b, 2f)
18698 +#endif
18699 +
18700 + movl %eax, (v)
18701 + movl %edx, 4(v)
18702 +
18703 +#ifdef CONFIG_PAX_REFCOUNT
18704 +2:
18705 +#endif
18706 +
18707 +RET_ENDP
18708 +BEGIN(inc_return_unchecked)
18709 + movl (v), %eax
18710 + movl 4(v), %edx
18711 + addl $1, %eax
18712 + adcl $0, %edx
18713 movl %eax, (v)
18714 movl %edx, 4(v)
18715 RET_ENDP
18716 @@ -123,6 +232,20 @@ RET_ENDP
18717 BEGIN(dec)
18718 subl $1, (v)
18719 sbbl $0, 4(v)
18720 +
18721 +#ifdef CONFIG_PAX_REFCOUNT
18722 + jno 0f
18723 + addl $1, (v)
18724 + adcl $0, 4(v)
18725 + int $4
18726 +0:
18727 + _ASM_EXTABLE(0b, 0b)
18728 +#endif
18729 +
18730 +RET_ENDP
18731 +BEGIN(dec_unchecked)
18732 + subl $1, (v)
18733 + sbbl $0, 4(v)
18734 RET_ENDP
18735 #undef v
18736
18737 @@ -132,6 +255,26 @@ BEGIN(dec_return)
18738 movl 4(v), %edx
18739 subl $1, %eax
18740 sbbl $0, %edx
18741 +
18742 +#ifdef CONFIG_PAX_REFCOUNT
18743 + into
18744 +1234:
18745 + _ASM_EXTABLE(1234b, 2f)
18746 +#endif
18747 +
18748 + movl %eax, (v)
18749 + movl %edx, 4(v)
18750 +
18751 +#ifdef CONFIG_PAX_REFCOUNT
18752 +2:
18753 +#endif
18754 +
18755 +RET_ENDP
18756 +BEGIN(dec_return_unchecked)
18757 + movl (v), %eax
18758 + movl 4(v), %edx
18759 + subl $1, %eax
18760 + sbbl $0, %edx
18761 movl %eax, (v)
18762 movl %edx, 4(v)
18763 RET_ENDP
18764 @@ -143,6 +286,13 @@ BEGIN(add_unless)
18765 adcl %edx, %edi
18766 addl (v), %eax
18767 adcl 4(v), %edx
18768 +
18769 +#ifdef CONFIG_PAX_REFCOUNT
18770 + into
18771 +1234:
18772 + _ASM_EXTABLE(1234b, 2f)
18773 +#endif
18774 +
18775 cmpl %eax, %esi
18776 je 3f
18777 1:
18778 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18779 1:
18780 addl $1, %eax
18781 adcl $0, %edx
18782 +
18783 +#ifdef CONFIG_PAX_REFCOUNT
18784 + into
18785 +1234:
18786 + _ASM_EXTABLE(1234b, 2f)
18787 +#endif
18788 +
18789 movl %eax, (v)
18790 movl %edx, 4(v)
18791 movl $1, %eax
18792 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18793 movl 4(v), %edx
18794 subl $1, %eax
18795 sbbl $0, %edx
18796 +
18797 +#ifdef CONFIG_PAX_REFCOUNT
18798 + into
18799 +1234:
18800 + _ASM_EXTABLE(1234b, 1f)
18801 +#endif
18802 +
18803 js 1f
18804 movl %eax, (v)
18805 movl %edx, 4(v)
18806 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18807 index 391a083..d658e9f 100644
18808 --- a/arch/x86/lib/atomic64_cx8_32.S
18809 +++ b/arch/x86/lib/atomic64_cx8_32.S
18810 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18811 CFI_STARTPROC
18812
18813 read64 %ecx
18814 + pax_force_retaddr
18815 ret
18816 CFI_ENDPROC
18817 ENDPROC(atomic64_read_cx8)
18818
18819 +ENTRY(atomic64_read_unchecked_cx8)
18820 + CFI_STARTPROC
18821 +
18822 + read64 %ecx
18823 + pax_force_retaddr
18824 + ret
18825 + CFI_ENDPROC
18826 +ENDPROC(atomic64_read_unchecked_cx8)
18827 +
18828 ENTRY(atomic64_set_cx8)
18829 CFI_STARTPROC
18830
18831 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18832 cmpxchg8b (%esi)
18833 jne 1b
18834
18835 + pax_force_retaddr
18836 ret
18837 CFI_ENDPROC
18838 ENDPROC(atomic64_set_cx8)
18839
18840 +ENTRY(atomic64_set_unchecked_cx8)
18841 + CFI_STARTPROC
18842 +
18843 +1:
18844 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
18845 + * are atomic on 586 and newer */
18846 + cmpxchg8b (%esi)
18847 + jne 1b
18848 +
18849 + pax_force_retaddr
18850 + ret
18851 + CFI_ENDPROC
18852 +ENDPROC(atomic64_set_unchecked_cx8)
18853 +
18854 ENTRY(atomic64_xchg_cx8)
18855 CFI_STARTPROC
18856
18857 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18858 cmpxchg8b (%esi)
18859 jne 1b
18860
18861 + pax_force_retaddr
18862 ret
18863 CFI_ENDPROC
18864 ENDPROC(atomic64_xchg_cx8)
18865
18866 -.macro addsub_return func ins insc
18867 -ENTRY(atomic64_\func\()_return_cx8)
18868 +.macro addsub_return func ins insc unchecked=""
18869 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18870 CFI_STARTPROC
18871 SAVE ebp
18872 SAVE ebx
18873 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18874 movl %edx, %ecx
18875 \ins\()l %esi, %ebx
18876 \insc\()l %edi, %ecx
18877 +
18878 +.ifb \unchecked
18879 +#ifdef CONFIG_PAX_REFCOUNT
18880 + into
18881 +2:
18882 + _ASM_EXTABLE(2b, 3f)
18883 +#endif
18884 +.endif
18885 +
18886 LOCK_PREFIX
18887 cmpxchg8b (%ebp)
18888 jne 1b
18889 -
18890 -10:
18891 movl %ebx, %eax
18892 movl %ecx, %edx
18893 +
18894 +.ifb \unchecked
18895 +#ifdef CONFIG_PAX_REFCOUNT
18896 +3:
18897 +#endif
18898 +.endif
18899 +
18900 RESTORE edi
18901 RESTORE esi
18902 RESTORE ebx
18903 RESTORE ebp
18904 + pax_force_retaddr
18905 ret
18906 CFI_ENDPROC
18907 -ENDPROC(atomic64_\func\()_return_cx8)
18908 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18909 .endm
18910
18911 addsub_return add add adc
18912 addsub_return sub sub sbb
18913 +addsub_return add add adc _unchecked
18914 +addsub_return sub sub sbb _unchecked
18915
18916 -.macro incdec_return func ins insc
18917 -ENTRY(atomic64_\func\()_return_cx8)
18918 +.macro incdec_return func ins insc unchecked
18919 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18920 CFI_STARTPROC
18921 SAVE ebx
18922
18923 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18924 movl %edx, %ecx
18925 \ins\()l $1, %ebx
18926 \insc\()l $0, %ecx
18927 +
18928 +.ifb \unchecked
18929 +#ifdef CONFIG_PAX_REFCOUNT
18930 + into
18931 +2:
18932 + _ASM_EXTABLE(2b, 3f)
18933 +#endif
18934 +.endif
18935 +
18936 LOCK_PREFIX
18937 cmpxchg8b (%esi)
18938 jne 1b
18939
18940 -10:
18941 movl %ebx, %eax
18942 movl %ecx, %edx
18943 +
18944 +.ifb \unchecked
18945 +#ifdef CONFIG_PAX_REFCOUNT
18946 +3:
18947 +#endif
18948 +.endif
18949 +
18950 RESTORE ebx
18951 + pax_force_retaddr
18952 ret
18953 CFI_ENDPROC
18954 -ENDPROC(atomic64_\func\()_return_cx8)
18955 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18956 .endm
18957
18958 incdec_return inc add adc
18959 incdec_return dec sub sbb
18960 +incdec_return inc add adc _unchecked
18961 +incdec_return dec sub sbb _unchecked
18962
18963 ENTRY(atomic64_dec_if_positive_cx8)
18964 CFI_STARTPROC
18965 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18966 movl %edx, %ecx
18967 subl $1, %ebx
18968 sbb $0, %ecx
18969 +
18970 +#ifdef CONFIG_PAX_REFCOUNT
18971 + into
18972 +1234:
18973 + _ASM_EXTABLE(1234b, 2f)
18974 +#endif
18975 +
18976 js 2f
18977 LOCK_PREFIX
18978 cmpxchg8b (%esi)
18979 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18980 movl %ebx, %eax
18981 movl %ecx, %edx
18982 RESTORE ebx
18983 + pax_force_retaddr
18984 ret
18985 CFI_ENDPROC
18986 ENDPROC(atomic64_dec_if_positive_cx8)
18987 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
18988 movl %edx, %ecx
18989 addl %esi, %ebx
18990 adcl %edi, %ecx
18991 +
18992 +#ifdef CONFIG_PAX_REFCOUNT
18993 + into
18994 +1234:
18995 + _ASM_EXTABLE(1234b, 3f)
18996 +#endif
18997 +
18998 LOCK_PREFIX
18999 cmpxchg8b (%ebp)
19000 jne 1b
19001 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19002 CFI_ADJUST_CFA_OFFSET -8
19003 RESTORE ebx
19004 RESTORE ebp
19005 + pax_force_retaddr
19006 ret
19007 4:
19008 cmpl %edx, 4(%esp)
19009 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19010 movl %edx, %ecx
19011 addl $1, %ebx
19012 adcl $0, %ecx
19013 +
19014 +#ifdef CONFIG_PAX_REFCOUNT
19015 + into
19016 +1234:
19017 + _ASM_EXTABLE(1234b, 3f)
19018 +#endif
19019 +
19020 LOCK_PREFIX
19021 cmpxchg8b (%esi)
19022 jne 1b
19023 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19024 movl $1, %eax
19025 3:
19026 RESTORE ebx
19027 + pax_force_retaddr
19028 ret
19029 4:
19030 testl %edx, %edx
19031 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19032 index 78d16a5..fbcf666 100644
19033 --- a/arch/x86/lib/checksum_32.S
19034 +++ b/arch/x86/lib/checksum_32.S
19035 @@ -28,7 +28,8 @@
19036 #include <linux/linkage.h>
19037 #include <asm/dwarf2.h>
19038 #include <asm/errno.h>
19039 -
19040 +#include <asm/segment.h>
19041 +
19042 /*
19043 * computes a partial checksum, e.g. for TCP/UDP fragments
19044 */
19045 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19046
19047 #define ARGBASE 16
19048 #define FP 12
19049 -
19050 -ENTRY(csum_partial_copy_generic)
19051 +
19052 +ENTRY(csum_partial_copy_generic_to_user)
19053 CFI_STARTPROC
19054 +
19055 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19056 + pushl_cfi %gs
19057 + popl_cfi %es
19058 + jmp csum_partial_copy_generic
19059 +#endif
19060 +
19061 +ENTRY(csum_partial_copy_generic_from_user)
19062 +
19063 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19064 + pushl_cfi %gs
19065 + popl_cfi %ds
19066 +#endif
19067 +
19068 +ENTRY(csum_partial_copy_generic)
19069 subl $4,%esp
19070 CFI_ADJUST_CFA_OFFSET 4
19071 pushl_cfi %edi
19072 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19073 jmp 4f
19074 SRC(1: movw (%esi), %bx )
19075 addl $2, %esi
19076 -DST( movw %bx, (%edi) )
19077 +DST( movw %bx, %es:(%edi) )
19078 addl $2, %edi
19079 addw %bx, %ax
19080 adcl $0, %eax
19081 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19082 SRC(1: movl (%esi), %ebx )
19083 SRC( movl 4(%esi), %edx )
19084 adcl %ebx, %eax
19085 -DST( movl %ebx, (%edi) )
19086 +DST( movl %ebx, %es:(%edi) )
19087 adcl %edx, %eax
19088 -DST( movl %edx, 4(%edi) )
19089 +DST( movl %edx, %es:4(%edi) )
19090
19091 SRC( movl 8(%esi), %ebx )
19092 SRC( movl 12(%esi), %edx )
19093 adcl %ebx, %eax
19094 -DST( movl %ebx, 8(%edi) )
19095 +DST( movl %ebx, %es:8(%edi) )
19096 adcl %edx, %eax
19097 -DST( movl %edx, 12(%edi) )
19098 +DST( movl %edx, %es:12(%edi) )
19099
19100 SRC( movl 16(%esi), %ebx )
19101 SRC( movl 20(%esi), %edx )
19102 adcl %ebx, %eax
19103 -DST( movl %ebx, 16(%edi) )
19104 +DST( movl %ebx, %es:16(%edi) )
19105 adcl %edx, %eax
19106 -DST( movl %edx, 20(%edi) )
19107 +DST( movl %edx, %es:20(%edi) )
19108
19109 SRC( movl 24(%esi), %ebx )
19110 SRC( movl 28(%esi), %edx )
19111 adcl %ebx, %eax
19112 -DST( movl %ebx, 24(%edi) )
19113 +DST( movl %ebx, %es:24(%edi) )
19114 adcl %edx, %eax
19115 -DST( movl %edx, 28(%edi) )
19116 +DST( movl %edx, %es:28(%edi) )
19117
19118 lea 32(%esi), %esi
19119 lea 32(%edi), %edi
19120 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19121 shrl $2, %edx # This clears CF
19122 SRC(3: movl (%esi), %ebx )
19123 adcl %ebx, %eax
19124 -DST( movl %ebx, (%edi) )
19125 +DST( movl %ebx, %es:(%edi) )
19126 lea 4(%esi), %esi
19127 lea 4(%edi), %edi
19128 dec %edx
19129 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19130 jb 5f
19131 SRC( movw (%esi), %cx )
19132 leal 2(%esi), %esi
19133 -DST( movw %cx, (%edi) )
19134 +DST( movw %cx, %es:(%edi) )
19135 leal 2(%edi), %edi
19136 je 6f
19137 shll $16,%ecx
19138 SRC(5: movb (%esi), %cl )
19139 -DST( movb %cl, (%edi) )
19140 +DST( movb %cl, %es:(%edi) )
19141 6: addl %ecx, %eax
19142 adcl $0, %eax
19143 7:
19144 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19145
19146 6001:
19147 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19148 - movl $-EFAULT, (%ebx)
19149 + movl $-EFAULT, %ss:(%ebx)
19150
19151 # zero the complete destination - computing the rest
19152 # is too much work
19153 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19154
19155 6002:
19156 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19157 - movl $-EFAULT,(%ebx)
19158 + movl $-EFAULT,%ss:(%ebx)
19159 jmp 5000b
19160
19161 .previous
19162
19163 + pushl_cfi %ss
19164 + popl_cfi %ds
19165 + pushl_cfi %ss
19166 + popl_cfi %es
19167 popl_cfi %ebx
19168 CFI_RESTORE ebx
19169 popl_cfi %esi
19170 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19171 popl_cfi %ecx # equivalent to addl $4,%esp
19172 ret
19173 CFI_ENDPROC
19174 -ENDPROC(csum_partial_copy_generic)
19175 +ENDPROC(csum_partial_copy_generic_to_user)
19176
19177 #else
19178
19179 /* Version for PentiumII/PPro */
19180
19181 #define ROUND1(x) \
19182 + nop; nop; nop; \
19183 SRC(movl x(%esi), %ebx ) ; \
19184 addl %ebx, %eax ; \
19185 - DST(movl %ebx, x(%edi) ) ;
19186 + DST(movl %ebx, %es:x(%edi)) ;
19187
19188 #define ROUND(x) \
19189 + nop; nop; nop; \
19190 SRC(movl x(%esi), %ebx ) ; \
19191 adcl %ebx, %eax ; \
19192 - DST(movl %ebx, x(%edi) ) ;
19193 + DST(movl %ebx, %es:x(%edi)) ;
19194
19195 #define ARGBASE 12
19196 -
19197 -ENTRY(csum_partial_copy_generic)
19198 +
19199 +ENTRY(csum_partial_copy_generic_to_user)
19200 CFI_STARTPROC
19201 +
19202 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19203 + pushl_cfi %gs
19204 + popl_cfi %es
19205 + jmp csum_partial_copy_generic
19206 +#endif
19207 +
19208 +ENTRY(csum_partial_copy_generic_from_user)
19209 +
19210 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19211 + pushl_cfi %gs
19212 + popl_cfi %ds
19213 +#endif
19214 +
19215 +ENTRY(csum_partial_copy_generic)
19216 pushl_cfi %ebx
19217 CFI_REL_OFFSET ebx, 0
19218 pushl_cfi %edi
19219 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19220 subl %ebx, %edi
19221 lea -1(%esi),%edx
19222 andl $-32,%edx
19223 - lea 3f(%ebx,%ebx), %ebx
19224 + lea 3f(%ebx,%ebx,2), %ebx
19225 testl %esi, %esi
19226 jmp *%ebx
19227 1: addl $64,%esi
19228 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19229 jb 5f
19230 SRC( movw (%esi), %dx )
19231 leal 2(%esi), %esi
19232 -DST( movw %dx, (%edi) )
19233 +DST( movw %dx, %es:(%edi) )
19234 leal 2(%edi), %edi
19235 je 6f
19236 shll $16,%edx
19237 5:
19238 SRC( movb (%esi), %dl )
19239 -DST( movb %dl, (%edi) )
19240 +DST( movb %dl, %es:(%edi) )
19241 6: addl %edx, %eax
19242 adcl $0, %eax
19243 7:
19244 .section .fixup, "ax"
19245 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19246 - movl $-EFAULT, (%ebx)
19247 + movl $-EFAULT, %ss:(%ebx)
19248 # zero the complete destination (computing the rest is too much work)
19249 movl ARGBASE+8(%esp),%edi # dst
19250 movl ARGBASE+12(%esp),%ecx # len
19251 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19252 rep; stosb
19253 jmp 7b
19254 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19255 - movl $-EFAULT, (%ebx)
19256 + movl $-EFAULT, %ss:(%ebx)
19257 jmp 7b
19258 .previous
19259
19260 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19261 + pushl_cfi %ss
19262 + popl_cfi %ds
19263 + pushl_cfi %ss
19264 + popl_cfi %es
19265 +#endif
19266 +
19267 popl_cfi %esi
19268 CFI_RESTORE esi
19269 popl_cfi %edi
19270 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19271 CFI_RESTORE ebx
19272 ret
19273 CFI_ENDPROC
19274 -ENDPROC(csum_partial_copy_generic)
19275 +ENDPROC(csum_partial_copy_generic_to_user)
19276
19277 #undef ROUND
19278 #undef ROUND1
19279 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19280 index f2145cf..cea889d 100644
19281 --- a/arch/x86/lib/clear_page_64.S
19282 +++ b/arch/x86/lib/clear_page_64.S
19283 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19284 movl $4096/8,%ecx
19285 xorl %eax,%eax
19286 rep stosq
19287 + pax_force_retaddr
19288 ret
19289 CFI_ENDPROC
19290 ENDPROC(clear_page_c)
19291 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19292 movl $4096,%ecx
19293 xorl %eax,%eax
19294 rep stosb
19295 + pax_force_retaddr
19296 ret
19297 CFI_ENDPROC
19298 ENDPROC(clear_page_c_e)
19299 @@ -43,6 +45,7 @@ ENTRY(clear_page)
19300 leaq 64(%rdi),%rdi
19301 jnz .Lloop
19302 nop
19303 + pax_force_retaddr
19304 ret
19305 CFI_ENDPROC
19306 .Lclear_page_end:
19307 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
19308
19309 #include <asm/cpufeature.h>
19310
19311 - .section .altinstr_replacement,"ax"
19312 + .section .altinstr_replacement,"a"
19313 1: .byte 0xeb /* jmp <disp8> */
19314 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19315 2: .byte 0xeb /* jmp <disp8> */
19316 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19317 index 1e572c5..2a162cd 100644
19318 --- a/arch/x86/lib/cmpxchg16b_emu.S
19319 +++ b/arch/x86/lib/cmpxchg16b_emu.S
19320 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19321
19322 popf
19323 mov $1, %al
19324 + pax_force_retaddr
19325 ret
19326
19327 not_same:
19328 popf
19329 xor %al,%al
19330 + pax_force_retaddr
19331 ret
19332
19333 CFI_ENDPROC
19334 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19335 index 01c805b..dccb07f 100644
19336 --- a/arch/x86/lib/copy_page_64.S
19337 +++ b/arch/x86/lib/copy_page_64.S
19338 @@ -9,6 +9,7 @@ copy_page_c:
19339 CFI_STARTPROC
19340 movl $4096/8,%ecx
19341 rep movsq
19342 + pax_force_retaddr
19343 ret
19344 CFI_ENDPROC
19345 ENDPROC(copy_page_c)
19346 @@ -39,7 +40,7 @@ ENTRY(copy_page)
19347 movq 16 (%rsi), %rdx
19348 movq 24 (%rsi), %r8
19349 movq 32 (%rsi), %r9
19350 - movq 40 (%rsi), %r10
19351 + movq 40 (%rsi), %r13
19352 movq 48 (%rsi), %r11
19353 movq 56 (%rsi), %r12
19354
19355 @@ -50,7 +51,7 @@ ENTRY(copy_page)
19356 movq %rdx, 16 (%rdi)
19357 movq %r8, 24 (%rdi)
19358 movq %r9, 32 (%rdi)
19359 - movq %r10, 40 (%rdi)
19360 + movq %r13, 40 (%rdi)
19361 movq %r11, 48 (%rdi)
19362 movq %r12, 56 (%rdi)
19363
19364 @@ -69,7 +70,7 @@ ENTRY(copy_page)
19365 movq 16 (%rsi), %rdx
19366 movq 24 (%rsi), %r8
19367 movq 32 (%rsi), %r9
19368 - movq 40 (%rsi), %r10
19369 + movq 40 (%rsi), %r13
19370 movq 48 (%rsi), %r11
19371 movq 56 (%rsi), %r12
19372
19373 @@ -78,7 +79,7 @@ ENTRY(copy_page)
19374 movq %rdx, 16 (%rdi)
19375 movq %r8, 24 (%rdi)
19376 movq %r9, 32 (%rdi)
19377 - movq %r10, 40 (%rdi)
19378 + movq %r13, 40 (%rdi)
19379 movq %r11, 48 (%rdi)
19380 movq %r12, 56 (%rdi)
19381
19382 @@ -95,6 +96,7 @@ ENTRY(copy_page)
19383 CFI_RESTORE r13
19384 addq $3*8,%rsp
19385 CFI_ADJUST_CFA_OFFSET -3*8
19386 + pax_force_retaddr
19387 ret
19388 .Lcopy_page_end:
19389 CFI_ENDPROC
19390 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
19391
19392 #include <asm/cpufeature.h>
19393
19394 - .section .altinstr_replacement,"ax"
19395 + .section .altinstr_replacement,"a"
19396 1: .byte 0xeb /* jmp <disp8> */
19397 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19398 2:
19399 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19400 index 0248402..821c786 100644
19401 --- a/arch/x86/lib/copy_user_64.S
19402 +++ b/arch/x86/lib/copy_user_64.S
19403 @@ -16,6 +16,7 @@
19404 #include <asm/thread_info.h>
19405 #include <asm/cpufeature.h>
19406 #include <asm/alternative-asm.h>
19407 +#include <asm/pgtable.h>
19408
19409 /*
19410 * By placing feature2 after feature1 in altinstructions section, we logically
19411 @@ -29,7 +30,7 @@
19412 .byte 0xe9 /* 32bit jump */
19413 .long \orig-1f /* by default jump to orig */
19414 1:
19415 - .section .altinstr_replacement,"ax"
19416 + .section .altinstr_replacement,"a"
19417 2: .byte 0xe9 /* near jump with 32bit immediate */
19418 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19419 3: .byte 0xe9 /* near jump with 32bit immediate */
19420 @@ -71,47 +72,20 @@
19421 #endif
19422 .endm
19423
19424 -/* Standard copy_to_user with segment limit checking */
19425 -ENTRY(_copy_to_user)
19426 - CFI_STARTPROC
19427 - GET_THREAD_INFO(%rax)
19428 - movq %rdi,%rcx
19429 - addq %rdx,%rcx
19430 - jc bad_to_user
19431 - cmpq TI_addr_limit(%rax),%rcx
19432 - ja bad_to_user
19433 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19434 - copy_user_generic_unrolled,copy_user_generic_string, \
19435 - copy_user_enhanced_fast_string
19436 - CFI_ENDPROC
19437 -ENDPROC(_copy_to_user)
19438 -
19439 -/* Standard copy_from_user with segment limit checking */
19440 -ENTRY(_copy_from_user)
19441 - CFI_STARTPROC
19442 - GET_THREAD_INFO(%rax)
19443 - movq %rsi,%rcx
19444 - addq %rdx,%rcx
19445 - jc bad_from_user
19446 - cmpq TI_addr_limit(%rax),%rcx
19447 - ja bad_from_user
19448 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19449 - copy_user_generic_unrolled,copy_user_generic_string, \
19450 - copy_user_enhanced_fast_string
19451 - CFI_ENDPROC
19452 -ENDPROC(_copy_from_user)
19453 -
19454 .section .fixup,"ax"
19455 /* must zero dest */
19456 ENTRY(bad_from_user)
19457 bad_from_user:
19458 CFI_STARTPROC
19459 + testl %edx,%edx
19460 + js bad_to_user
19461 movl %edx,%ecx
19462 xorl %eax,%eax
19463 rep
19464 stosb
19465 bad_to_user:
19466 movl %edx,%eax
19467 + pax_force_retaddr
19468 ret
19469 CFI_ENDPROC
19470 ENDPROC(bad_from_user)
19471 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19472 jz 17f
19473 1: movq (%rsi),%r8
19474 2: movq 1*8(%rsi),%r9
19475 -3: movq 2*8(%rsi),%r10
19476 +3: movq 2*8(%rsi),%rax
19477 4: movq 3*8(%rsi),%r11
19478 5: movq %r8,(%rdi)
19479 6: movq %r9,1*8(%rdi)
19480 -7: movq %r10,2*8(%rdi)
19481 +7: movq %rax,2*8(%rdi)
19482 8: movq %r11,3*8(%rdi)
19483 9: movq 4*8(%rsi),%r8
19484 10: movq 5*8(%rsi),%r9
19485 -11: movq 6*8(%rsi),%r10
19486 +11: movq 6*8(%rsi),%rax
19487 12: movq 7*8(%rsi),%r11
19488 13: movq %r8,4*8(%rdi)
19489 14: movq %r9,5*8(%rdi)
19490 -15: movq %r10,6*8(%rdi)
19491 +15: movq %rax,6*8(%rdi)
19492 16: movq %r11,7*8(%rdi)
19493 leaq 64(%rsi),%rsi
19494 leaq 64(%rdi),%rdi
19495 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19496 decl %ecx
19497 jnz 21b
19498 23: xor %eax,%eax
19499 + pax_force_retaddr
19500 ret
19501
19502 .section .fixup,"ax"
19503 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19504 3: rep
19505 movsb
19506 4: xorl %eax,%eax
19507 + pax_force_retaddr
19508 ret
19509
19510 .section .fixup,"ax"
19511 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19512 1: rep
19513 movsb
19514 2: xorl %eax,%eax
19515 + pax_force_retaddr
19516 ret
19517
19518 .section .fixup,"ax"
19519 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19520 index cb0c112..e3a6895 100644
19521 --- a/arch/x86/lib/copy_user_nocache_64.S
19522 +++ b/arch/x86/lib/copy_user_nocache_64.S
19523 @@ -8,12 +8,14 @@
19524
19525 #include <linux/linkage.h>
19526 #include <asm/dwarf2.h>
19527 +#include <asm/alternative-asm.h>
19528
19529 #define FIX_ALIGNMENT 1
19530
19531 #include <asm/current.h>
19532 #include <asm/asm-offsets.h>
19533 #include <asm/thread_info.h>
19534 +#include <asm/pgtable.h>
19535
19536 .macro ALIGN_DESTINATION
19537 #ifdef FIX_ALIGNMENT
19538 @@ -50,6 +52,15 @@
19539 */
19540 ENTRY(__copy_user_nocache)
19541 CFI_STARTPROC
19542 +
19543 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19544 + mov $PAX_USER_SHADOW_BASE,%rcx
19545 + cmp %rcx,%rsi
19546 + jae 1f
19547 + add %rcx,%rsi
19548 +1:
19549 +#endif
19550 +
19551 cmpl $8,%edx
19552 jb 20f /* less then 8 bytes, go to byte copy loop */
19553 ALIGN_DESTINATION
19554 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19555 jz 17f
19556 1: movq (%rsi),%r8
19557 2: movq 1*8(%rsi),%r9
19558 -3: movq 2*8(%rsi),%r10
19559 +3: movq 2*8(%rsi),%rax
19560 4: movq 3*8(%rsi),%r11
19561 5: movnti %r8,(%rdi)
19562 6: movnti %r9,1*8(%rdi)
19563 -7: movnti %r10,2*8(%rdi)
19564 +7: movnti %rax,2*8(%rdi)
19565 8: movnti %r11,3*8(%rdi)
19566 9: movq 4*8(%rsi),%r8
19567 10: movq 5*8(%rsi),%r9
19568 -11: movq 6*8(%rsi),%r10
19569 +11: movq 6*8(%rsi),%rax
19570 12: movq 7*8(%rsi),%r11
19571 13: movnti %r8,4*8(%rdi)
19572 14: movnti %r9,5*8(%rdi)
19573 -15: movnti %r10,6*8(%rdi)
19574 +15: movnti %rax,6*8(%rdi)
19575 16: movnti %r11,7*8(%rdi)
19576 leaq 64(%rsi),%rsi
19577 leaq 64(%rdi),%rdi
19578 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19579 jnz 21b
19580 23: xorl %eax,%eax
19581 sfence
19582 + pax_force_retaddr
19583 ret
19584
19585 .section .fixup,"ax"
19586 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19587 index fb903b7..c92b7f7 100644
19588 --- a/arch/x86/lib/csum-copy_64.S
19589 +++ b/arch/x86/lib/csum-copy_64.S
19590 @@ -8,6 +8,7 @@
19591 #include <linux/linkage.h>
19592 #include <asm/dwarf2.h>
19593 #include <asm/errno.h>
19594 +#include <asm/alternative-asm.h>
19595
19596 /*
19597 * Checksum copy with exception handling.
19598 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19599 CFI_RESTORE rbp
19600 addq $7*8, %rsp
19601 CFI_ADJUST_CFA_OFFSET -7*8
19602 + pax_force_retaddr 0, 1
19603 ret
19604 CFI_RESTORE_STATE
19605
19606 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19607 index 459b58a..9570bc7 100644
19608 --- a/arch/x86/lib/csum-wrappers_64.c
19609 +++ b/arch/x86/lib/csum-wrappers_64.c
19610 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19611 len -= 2;
19612 }
19613 }
19614 - isum = csum_partial_copy_generic((__force const void *)src,
19615 +
19616 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19617 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19618 + src += PAX_USER_SHADOW_BASE;
19619 +#endif
19620 +
19621 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
19622 dst, len, isum, errp, NULL);
19623 if (unlikely(*errp))
19624 goto out_err;
19625 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19626 }
19627
19628 *errp = 0;
19629 - return csum_partial_copy_generic(src, (void __force *)dst,
19630 +
19631 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19632 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19633 + dst += PAX_USER_SHADOW_BASE;
19634 +#endif
19635 +
19636 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19637 len, isum, NULL, errp);
19638 }
19639 EXPORT_SYMBOL(csum_partial_copy_to_user);
19640 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19641 index 51f1504..ddac4c1 100644
19642 --- a/arch/x86/lib/getuser.S
19643 +++ b/arch/x86/lib/getuser.S
19644 @@ -33,15 +33,38 @@
19645 #include <asm/asm-offsets.h>
19646 #include <asm/thread_info.h>
19647 #include <asm/asm.h>
19648 +#include <asm/segment.h>
19649 +#include <asm/pgtable.h>
19650 +#include <asm/alternative-asm.h>
19651 +
19652 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19653 +#define __copyuser_seg gs;
19654 +#else
19655 +#define __copyuser_seg
19656 +#endif
19657
19658 .text
19659 ENTRY(__get_user_1)
19660 CFI_STARTPROC
19661 +
19662 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19663 GET_THREAD_INFO(%_ASM_DX)
19664 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19665 jae bad_get_user
19666 -1: movzb (%_ASM_AX),%edx
19667 +
19668 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19669 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19670 + cmp %_ASM_DX,%_ASM_AX
19671 + jae 1234f
19672 + add %_ASM_DX,%_ASM_AX
19673 +1234:
19674 +#endif
19675 +
19676 +#endif
19677 +
19678 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19679 xor %eax,%eax
19680 + pax_force_retaddr
19681 ret
19682 CFI_ENDPROC
19683 ENDPROC(__get_user_1)
19684 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19685 ENTRY(__get_user_2)
19686 CFI_STARTPROC
19687 add $1,%_ASM_AX
19688 +
19689 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19690 jc bad_get_user
19691 GET_THREAD_INFO(%_ASM_DX)
19692 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19693 jae bad_get_user
19694 -2: movzwl -1(%_ASM_AX),%edx
19695 +
19696 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19697 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19698 + cmp %_ASM_DX,%_ASM_AX
19699 + jae 1234f
19700 + add %_ASM_DX,%_ASM_AX
19701 +1234:
19702 +#endif
19703 +
19704 +#endif
19705 +
19706 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19707 xor %eax,%eax
19708 + pax_force_retaddr
19709 ret
19710 CFI_ENDPROC
19711 ENDPROC(__get_user_2)
19712 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19713 ENTRY(__get_user_4)
19714 CFI_STARTPROC
19715 add $3,%_ASM_AX
19716 +
19717 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19718 jc bad_get_user
19719 GET_THREAD_INFO(%_ASM_DX)
19720 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19721 jae bad_get_user
19722 -3: mov -3(%_ASM_AX),%edx
19723 +
19724 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19725 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19726 + cmp %_ASM_DX,%_ASM_AX
19727 + jae 1234f
19728 + add %_ASM_DX,%_ASM_AX
19729 +1234:
19730 +#endif
19731 +
19732 +#endif
19733 +
19734 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19735 xor %eax,%eax
19736 + pax_force_retaddr
19737 ret
19738 CFI_ENDPROC
19739 ENDPROC(__get_user_4)
19740 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19741 GET_THREAD_INFO(%_ASM_DX)
19742 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19743 jae bad_get_user
19744 +
19745 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19746 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19747 + cmp %_ASM_DX,%_ASM_AX
19748 + jae 1234f
19749 + add %_ASM_DX,%_ASM_AX
19750 +1234:
19751 +#endif
19752 +
19753 4: movq -7(%_ASM_AX),%_ASM_DX
19754 xor %eax,%eax
19755 + pax_force_retaddr
19756 ret
19757 CFI_ENDPROC
19758 ENDPROC(__get_user_8)
19759 @@ -91,6 +152,7 @@ bad_get_user:
19760 CFI_STARTPROC
19761 xor %edx,%edx
19762 mov $(-EFAULT),%_ASM_AX
19763 + pax_force_retaddr
19764 ret
19765 CFI_ENDPROC
19766 END(bad_get_user)
19767 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19768 index 374562e..a75830b 100644
19769 --- a/arch/x86/lib/insn.c
19770 +++ b/arch/x86/lib/insn.c
19771 @@ -21,6 +21,11 @@
19772 #include <linux/string.h>
19773 #include <asm/inat.h>
19774 #include <asm/insn.h>
19775 +#ifdef __KERNEL__
19776 +#include <asm/pgtable_types.h>
19777 +#else
19778 +#define ktla_ktva(addr) addr
19779 +#endif
19780
19781 /* Verify next sizeof(t) bytes can be on the same instruction */
19782 #define validate_next(t, insn, n) \
19783 @@ -49,8 +54,8 @@
19784 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19785 {
19786 memset(insn, 0, sizeof(*insn));
19787 - insn->kaddr = kaddr;
19788 - insn->next_byte = kaddr;
19789 + insn->kaddr = ktla_ktva(kaddr);
19790 + insn->next_byte = ktla_ktva(kaddr);
19791 insn->x86_64 = x86_64 ? 1 : 0;
19792 insn->opnd_bytes = 4;
19793 if (x86_64)
19794 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19795 index 05a95e7..326f2fa 100644
19796 --- a/arch/x86/lib/iomap_copy_64.S
19797 +++ b/arch/x86/lib/iomap_copy_64.S
19798 @@ -17,6 +17,7 @@
19799
19800 #include <linux/linkage.h>
19801 #include <asm/dwarf2.h>
19802 +#include <asm/alternative-asm.h>
19803
19804 /*
19805 * override generic version in lib/iomap_copy.c
19806 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19807 CFI_STARTPROC
19808 movl %edx,%ecx
19809 rep movsd
19810 + pax_force_retaddr
19811 ret
19812 CFI_ENDPROC
19813 ENDPROC(__iowrite32_copy)
19814 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19815 index efbf2a0..8893637 100644
19816 --- a/arch/x86/lib/memcpy_64.S
19817 +++ b/arch/x86/lib/memcpy_64.S
19818 @@ -34,6 +34,7 @@
19819 rep movsq
19820 movl %edx, %ecx
19821 rep movsb
19822 + pax_force_retaddr
19823 ret
19824 .Lmemcpy_e:
19825 .previous
19826 @@ -51,6 +52,7 @@
19827
19828 movl %edx, %ecx
19829 rep movsb
19830 + pax_force_retaddr
19831 ret
19832 .Lmemcpy_e_e:
19833 .previous
19834 @@ -81,13 +83,13 @@ ENTRY(memcpy)
19835 */
19836 movq 0*8(%rsi), %r8
19837 movq 1*8(%rsi), %r9
19838 - movq 2*8(%rsi), %r10
19839 + movq 2*8(%rsi), %rcx
19840 movq 3*8(%rsi), %r11
19841 leaq 4*8(%rsi), %rsi
19842
19843 movq %r8, 0*8(%rdi)
19844 movq %r9, 1*8(%rdi)
19845 - movq %r10, 2*8(%rdi)
19846 + movq %rcx, 2*8(%rdi)
19847 movq %r11, 3*8(%rdi)
19848 leaq 4*8(%rdi), %rdi
19849 jae .Lcopy_forward_loop
19850 @@ -110,12 +112,12 @@ ENTRY(memcpy)
19851 subq $0x20, %rdx
19852 movq -1*8(%rsi), %r8
19853 movq -2*8(%rsi), %r9
19854 - movq -3*8(%rsi), %r10
19855 + movq -3*8(%rsi), %rcx
19856 movq -4*8(%rsi), %r11
19857 leaq -4*8(%rsi), %rsi
19858 movq %r8, -1*8(%rdi)
19859 movq %r9, -2*8(%rdi)
19860 - movq %r10, -3*8(%rdi)
19861 + movq %rcx, -3*8(%rdi)
19862 movq %r11, -4*8(%rdi)
19863 leaq -4*8(%rdi), %rdi
19864 jae .Lcopy_backward_loop
19865 @@ -135,12 +137,13 @@ ENTRY(memcpy)
19866 */
19867 movq 0*8(%rsi), %r8
19868 movq 1*8(%rsi), %r9
19869 - movq -2*8(%rsi, %rdx), %r10
19870 + movq -2*8(%rsi, %rdx), %rcx
19871 movq -1*8(%rsi, %rdx), %r11
19872 movq %r8, 0*8(%rdi)
19873 movq %r9, 1*8(%rdi)
19874 - movq %r10, -2*8(%rdi, %rdx)
19875 + movq %rcx, -2*8(%rdi, %rdx)
19876 movq %r11, -1*8(%rdi, %rdx)
19877 + pax_force_retaddr
19878 retq
19879 .p2align 4
19880 .Lless_16bytes:
19881 @@ -153,6 +156,7 @@ ENTRY(memcpy)
19882 movq -1*8(%rsi, %rdx), %r9
19883 movq %r8, 0*8(%rdi)
19884 movq %r9, -1*8(%rdi, %rdx)
19885 + pax_force_retaddr
19886 retq
19887 .p2align 4
19888 .Lless_8bytes:
19889 @@ -166,6 +170,7 @@ ENTRY(memcpy)
19890 movl -4(%rsi, %rdx), %r8d
19891 movl %ecx, (%rdi)
19892 movl %r8d, -4(%rdi, %rdx)
19893 + pax_force_retaddr
19894 retq
19895 .p2align 4
19896 .Lless_3bytes:
19897 @@ -183,6 +188,7 @@ ENTRY(memcpy)
19898 jnz .Lloop_1
19899
19900 .Lend:
19901 + pax_force_retaddr
19902 retq
19903 CFI_ENDPROC
19904 ENDPROC(memcpy)
19905 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19906 index ee16461..c39c199 100644
19907 --- a/arch/x86/lib/memmove_64.S
19908 +++ b/arch/x86/lib/memmove_64.S
19909 @@ -61,13 +61,13 @@ ENTRY(memmove)
19910 5:
19911 sub $0x20, %rdx
19912 movq 0*8(%rsi), %r11
19913 - movq 1*8(%rsi), %r10
19914 + movq 1*8(%rsi), %rcx
19915 movq 2*8(%rsi), %r9
19916 movq 3*8(%rsi), %r8
19917 leaq 4*8(%rsi), %rsi
19918
19919 movq %r11, 0*8(%rdi)
19920 - movq %r10, 1*8(%rdi)
19921 + movq %rcx, 1*8(%rdi)
19922 movq %r9, 2*8(%rdi)
19923 movq %r8, 3*8(%rdi)
19924 leaq 4*8(%rdi), %rdi
19925 @@ -81,10 +81,10 @@ ENTRY(memmove)
19926 4:
19927 movq %rdx, %rcx
19928 movq -8(%rsi, %rdx), %r11
19929 - lea -8(%rdi, %rdx), %r10
19930 + lea -8(%rdi, %rdx), %r9
19931 shrq $3, %rcx
19932 rep movsq
19933 - movq %r11, (%r10)
19934 + movq %r11, (%r9)
19935 jmp 13f
19936 .Lmemmove_end_forward:
19937
19938 @@ -95,14 +95,14 @@ ENTRY(memmove)
19939 7:
19940 movq %rdx, %rcx
19941 movq (%rsi), %r11
19942 - movq %rdi, %r10
19943 + movq %rdi, %r9
19944 leaq -8(%rsi, %rdx), %rsi
19945 leaq -8(%rdi, %rdx), %rdi
19946 shrq $3, %rcx
19947 std
19948 rep movsq
19949 cld
19950 - movq %r11, (%r10)
19951 + movq %r11, (%r9)
19952 jmp 13f
19953
19954 /*
19955 @@ -127,13 +127,13 @@ ENTRY(memmove)
19956 8:
19957 subq $0x20, %rdx
19958 movq -1*8(%rsi), %r11
19959 - movq -2*8(%rsi), %r10
19960 + movq -2*8(%rsi), %rcx
19961 movq -3*8(%rsi), %r9
19962 movq -4*8(%rsi), %r8
19963 leaq -4*8(%rsi), %rsi
19964
19965 movq %r11, -1*8(%rdi)
19966 - movq %r10, -2*8(%rdi)
19967 + movq %rcx, -2*8(%rdi)
19968 movq %r9, -3*8(%rdi)
19969 movq %r8, -4*8(%rdi)
19970 leaq -4*8(%rdi), %rdi
19971 @@ -151,11 +151,11 @@ ENTRY(memmove)
19972 * Move data from 16 bytes to 31 bytes.
19973 */
19974 movq 0*8(%rsi), %r11
19975 - movq 1*8(%rsi), %r10
19976 + movq 1*8(%rsi), %rcx
19977 movq -2*8(%rsi, %rdx), %r9
19978 movq -1*8(%rsi, %rdx), %r8
19979 movq %r11, 0*8(%rdi)
19980 - movq %r10, 1*8(%rdi)
19981 + movq %rcx, 1*8(%rdi)
19982 movq %r9, -2*8(%rdi, %rdx)
19983 movq %r8, -1*8(%rdi, %rdx)
19984 jmp 13f
19985 @@ -167,9 +167,9 @@ ENTRY(memmove)
19986 * Move data from 8 bytes to 15 bytes.
19987 */
19988 movq 0*8(%rsi), %r11
19989 - movq -1*8(%rsi, %rdx), %r10
19990 + movq -1*8(%rsi, %rdx), %r9
19991 movq %r11, 0*8(%rdi)
19992 - movq %r10, -1*8(%rdi, %rdx)
19993 + movq %r9, -1*8(%rdi, %rdx)
19994 jmp 13f
19995 10:
19996 cmpq $4, %rdx
19997 @@ -178,9 +178,9 @@ ENTRY(memmove)
19998 * Move data from 4 bytes to 7 bytes.
19999 */
20000 movl (%rsi), %r11d
20001 - movl -4(%rsi, %rdx), %r10d
20002 + movl -4(%rsi, %rdx), %r9d
20003 movl %r11d, (%rdi)
20004 - movl %r10d, -4(%rdi, %rdx)
20005 + movl %r9d, -4(%rdi, %rdx)
20006 jmp 13f
20007 11:
20008 cmp $2, %rdx
20009 @@ -189,9 +189,9 @@ ENTRY(memmove)
20010 * Move data from 2 bytes to 3 bytes.
20011 */
20012 movw (%rsi), %r11w
20013 - movw -2(%rsi, %rdx), %r10w
20014 + movw -2(%rsi, %rdx), %r9w
20015 movw %r11w, (%rdi)
20016 - movw %r10w, -2(%rdi, %rdx)
20017 + movw %r9w, -2(%rdi, %rdx)
20018 jmp 13f
20019 12:
20020 cmp $1, %rdx
20021 @@ -202,6 +202,7 @@ ENTRY(memmove)
20022 movb (%rsi), %r11b
20023 movb %r11b, (%rdi)
20024 13:
20025 + pax_force_retaddr
20026 retq
20027 CFI_ENDPROC
20028
20029 @@ -210,6 +211,7 @@ ENTRY(memmove)
20030 /* Forward moving data. */
20031 movq %rdx, %rcx
20032 rep movsb
20033 + pax_force_retaddr
20034 retq
20035 .Lmemmove_end_forward_efs:
20036 .previous
20037 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20038 index 79bd454..dff325a 100644
20039 --- a/arch/x86/lib/memset_64.S
20040 +++ b/arch/x86/lib/memset_64.S
20041 @@ -31,6 +31,7 @@
20042 movl %r8d,%ecx
20043 rep stosb
20044 movq %r9,%rax
20045 + pax_force_retaddr
20046 ret
20047 .Lmemset_e:
20048 .previous
20049 @@ -53,6 +54,7 @@
20050 movl %edx,%ecx
20051 rep stosb
20052 movq %r9,%rax
20053 + pax_force_retaddr
20054 ret
20055 .Lmemset_e_e:
20056 .previous
20057 @@ -60,13 +62,13 @@
20058 ENTRY(memset)
20059 ENTRY(__memset)
20060 CFI_STARTPROC
20061 - movq %rdi,%r10
20062 movq %rdx,%r11
20063
20064 /* expand byte value */
20065 movzbl %sil,%ecx
20066 movabs $0x0101010101010101,%rax
20067 mul %rcx /* with rax, clobbers rdx */
20068 + movq %rdi,%rdx
20069
20070 /* align dst */
20071 movl %edi,%r9d
20072 @@ -120,7 +122,8 @@ ENTRY(__memset)
20073 jnz .Lloop_1
20074
20075 .Lende:
20076 - movq %r10,%rax
20077 + movq %rdx,%rax
20078 + pax_force_retaddr
20079 ret
20080
20081 CFI_RESTORE_STATE
20082 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20083 index c9f2d9b..e7fd2c0 100644
20084 --- a/arch/x86/lib/mmx_32.c
20085 +++ b/arch/x86/lib/mmx_32.c
20086 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20087 {
20088 void *p;
20089 int i;
20090 + unsigned long cr0;
20091
20092 if (unlikely(in_interrupt()))
20093 return __memcpy(to, from, len);
20094 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20095 kernel_fpu_begin();
20096
20097 __asm__ __volatile__ (
20098 - "1: prefetch (%0)\n" /* This set is 28 bytes */
20099 - " prefetch 64(%0)\n"
20100 - " prefetch 128(%0)\n"
20101 - " prefetch 192(%0)\n"
20102 - " prefetch 256(%0)\n"
20103 + "1: prefetch (%1)\n" /* This set is 28 bytes */
20104 + " prefetch 64(%1)\n"
20105 + " prefetch 128(%1)\n"
20106 + " prefetch 192(%1)\n"
20107 + " prefetch 256(%1)\n"
20108 "2: \n"
20109 ".section .fixup, \"ax\"\n"
20110 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20111 + "3: \n"
20112 +
20113 +#ifdef CONFIG_PAX_KERNEXEC
20114 + " movl %%cr0, %0\n"
20115 + " movl %0, %%eax\n"
20116 + " andl $0xFFFEFFFF, %%eax\n"
20117 + " movl %%eax, %%cr0\n"
20118 +#endif
20119 +
20120 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20121 +
20122 +#ifdef CONFIG_PAX_KERNEXEC
20123 + " movl %0, %%cr0\n"
20124 +#endif
20125 +
20126 " jmp 2b\n"
20127 ".previous\n"
20128 _ASM_EXTABLE(1b, 3b)
20129 - : : "r" (from));
20130 + : "=&r" (cr0) : "r" (from) : "ax");
20131
20132 for ( ; i > 5; i--) {
20133 __asm__ __volatile__ (
20134 - "1: prefetch 320(%0)\n"
20135 - "2: movq (%0), %%mm0\n"
20136 - " movq 8(%0), %%mm1\n"
20137 - " movq 16(%0), %%mm2\n"
20138 - " movq 24(%0), %%mm3\n"
20139 - " movq %%mm0, (%1)\n"
20140 - " movq %%mm1, 8(%1)\n"
20141 - " movq %%mm2, 16(%1)\n"
20142 - " movq %%mm3, 24(%1)\n"
20143 - " movq 32(%0), %%mm0\n"
20144 - " movq 40(%0), %%mm1\n"
20145 - " movq 48(%0), %%mm2\n"
20146 - " movq 56(%0), %%mm3\n"
20147 - " movq %%mm0, 32(%1)\n"
20148 - " movq %%mm1, 40(%1)\n"
20149 - " movq %%mm2, 48(%1)\n"
20150 - " movq %%mm3, 56(%1)\n"
20151 + "1: prefetch 320(%1)\n"
20152 + "2: movq (%1), %%mm0\n"
20153 + " movq 8(%1), %%mm1\n"
20154 + " movq 16(%1), %%mm2\n"
20155 + " movq 24(%1), %%mm3\n"
20156 + " movq %%mm0, (%2)\n"
20157 + " movq %%mm1, 8(%2)\n"
20158 + " movq %%mm2, 16(%2)\n"
20159 + " movq %%mm3, 24(%2)\n"
20160 + " movq 32(%1), %%mm0\n"
20161 + " movq 40(%1), %%mm1\n"
20162 + " movq 48(%1), %%mm2\n"
20163 + " movq 56(%1), %%mm3\n"
20164 + " movq %%mm0, 32(%2)\n"
20165 + " movq %%mm1, 40(%2)\n"
20166 + " movq %%mm2, 48(%2)\n"
20167 + " movq %%mm3, 56(%2)\n"
20168 ".section .fixup, \"ax\"\n"
20169 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20170 + "3:\n"
20171 +
20172 +#ifdef CONFIG_PAX_KERNEXEC
20173 + " movl %%cr0, %0\n"
20174 + " movl %0, %%eax\n"
20175 + " andl $0xFFFEFFFF, %%eax\n"
20176 + " movl %%eax, %%cr0\n"
20177 +#endif
20178 +
20179 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20180 +
20181 +#ifdef CONFIG_PAX_KERNEXEC
20182 + " movl %0, %%cr0\n"
20183 +#endif
20184 +
20185 " jmp 2b\n"
20186 ".previous\n"
20187 _ASM_EXTABLE(1b, 3b)
20188 - : : "r" (from), "r" (to) : "memory");
20189 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20190
20191 from += 64;
20192 to += 64;
20193 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20194 static void fast_copy_page(void *to, void *from)
20195 {
20196 int i;
20197 + unsigned long cr0;
20198
20199 kernel_fpu_begin();
20200
20201 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20202 * but that is for later. -AV
20203 */
20204 __asm__ __volatile__(
20205 - "1: prefetch (%0)\n"
20206 - " prefetch 64(%0)\n"
20207 - " prefetch 128(%0)\n"
20208 - " prefetch 192(%0)\n"
20209 - " prefetch 256(%0)\n"
20210 + "1: prefetch (%1)\n"
20211 + " prefetch 64(%1)\n"
20212 + " prefetch 128(%1)\n"
20213 + " prefetch 192(%1)\n"
20214 + " prefetch 256(%1)\n"
20215 "2: \n"
20216 ".section .fixup, \"ax\"\n"
20217 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20218 + "3: \n"
20219 +
20220 +#ifdef CONFIG_PAX_KERNEXEC
20221 + " movl %%cr0, %0\n"
20222 + " movl %0, %%eax\n"
20223 + " andl $0xFFFEFFFF, %%eax\n"
20224 + " movl %%eax, %%cr0\n"
20225 +#endif
20226 +
20227 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20228 +
20229 +#ifdef CONFIG_PAX_KERNEXEC
20230 + " movl %0, %%cr0\n"
20231 +#endif
20232 +
20233 " jmp 2b\n"
20234 ".previous\n"
20235 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20236 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20237
20238 for (i = 0; i < (4096-320)/64; i++) {
20239 __asm__ __volatile__ (
20240 - "1: prefetch 320(%0)\n"
20241 - "2: movq (%0), %%mm0\n"
20242 - " movntq %%mm0, (%1)\n"
20243 - " movq 8(%0), %%mm1\n"
20244 - " movntq %%mm1, 8(%1)\n"
20245 - " movq 16(%0), %%mm2\n"
20246 - " movntq %%mm2, 16(%1)\n"
20247 - " movq 24(%0), %%mm3\n"
20248 - " movntq %%mm3, 24(%1)\n"
20249 - " movq 32(%0), %%mm4\n"
20250 - " movntq %%mm4, 32(%1)\n"
20251 - " movq 40(%0), %%mm5\n"
20252 - " movntq %%mm5, 40(%1)\n"
20253 - " movq 48(%0), %%mm6\n"
20254 - " movntq %%mm6, 48(%1)\n"
20255 - " movq 56(%0), %%mm7\n"
20256 - " movntq %%mm7, 56(%1)\n"
20257 + "1: prefetch 320(%1)\n"
20258 + "2: movq (%1), %%mm0\n"
20259 + " movntq %%mm0, (%2)\n"
20260 + " movq 8(%1), %%mm1\n"
20261 + " movntq %%mm1, 8(%2)\n"
20262 + " movq 16(%1), %%mm2\n"
20263 + " movntq %%mm2, 16(%2)\n"
20264 + " movq 24(%1), %%mm3\n"
20265 + " movntq %%mm3, 24(%2)\n"
20266 + " movq 32(%1), %%mm4\n"
20267 + " movntq %%mm4, 32(%2)\n"
20268 + " movq 40(%1), %%mm5\n"
20269 + " movntq %%mm5, 40(%2)\n"
20270 + " movq 48(%1), %%mm6\n"
20271 + " movntq %%mm6, 48(%2)\n"
20272 + " movq 56(%1), %%mm7\n"
20273 + " movntq %%mm7, 56(%2)\n"
20274 ".section .fixup, \"ax\"\n"
20275 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20276 + "3:\n"
20277 +
20278 +#ifdef CONFIG_PAX_KERNEXEC
20279 + " movl %%cr0, %0\n"
20280 + " movl %0, %%eax\n"
20281 + " andl $0xFFFEFFFF, %%eax\n"
20282 + " movl %%eax, %%cr0\n"
20283 +#endif
20284 +
20285 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20286 +
20287 +#ifdef CONFIG_PAX_KERNEXEC
20288 + " movl %0, %%cr0\n"
20289 +#endif
20290 +
20291 " jmp 2b\n"
20292 ".previous\n"
20293 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20294 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20295
20296 from += 64;
20297 to += 64;
20298 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20299 static void fast_copy_page(void *to, void *from)
20300 {
20301 int i;
20302 + unsigned long cr0;
20303
20304 kernel_fpu_begin();
20305
20306 __asm__ __volatile__ (
20307 - "1: prefetch (%0)\n"
20308 - " prefetch 64(%0)\n"
20309 - " prefetch 128(%0)\n"
20310 - " prefetch 192(%0)\n"
20311 - " prefetch 256(%0)\n"
20312 + "1: prefetch (%1)\n"
20313 + " prefetch 64(%1)\n"
20314 + " prefetch 128(%1)\n"
20315 + " prefetch 192(%1)\n"
20316 + " prefetch 256(%1)\n"
20317 "2: \n"
20318 ".section .fixup, \"ax\"\n"
20319 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20320 + "3: \n"
20321 +
20322 +#ifdef CONFIG_PAX_KERNEXEC
20323 + " movl %%cr0, %0\n"
20324 + " movl %0, %%eax\n"
20325 + " andl $0xFFFEFFFF, %%eax\n"
20326 + " movl %%eax, %%cr0\n"
20327 +#endif
20328 +
20329 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20330 +
20331 +#ifdef CONFIG_PAX_KERNEXEC
20332 + " movl %0, %%cr0\n"
20333 +#endif
20334 +
20335 " jmp 2b\n"
20336 ".previous\n"
20337 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20338 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20339
20340 for (i = 0; i < 4096/64; i++) {
20341 __asm__ __volatile__ (
20342 - "1: prefetch 320(%0)\n"
20343 - "2: movq (%0), %%mm0\n"
20344 - " movq 8(%0), %%mm1\n"
20345 - " movq 16(%0), %%mm2\n"
20346 - " movq 24(%0), %%mm3\n"
20347 - " movq %%mm0, (%1)\n"
20348 - " movq %%mm1, 8(%1)\n"
20349 - " movq %%mm2, 16(%1)\n"
20350 - " movq %%mm3, 24(%1)\n"
20351 - " movq 32(%0), %%mm0\n"
20352 - " movq 40(%0), %%mm1\n"
20353 - " movq 48(%0), %%mm2\n"
20354 - " movq 56(%0), %%mm3\n"
20355 - " movq %%mm0, 32(%1)\n"
20356 - " movq %%mm1, 40(%1)\n"
20357 - " movq %%mm2, 48(%1)\n"
20358 - " movq %%mm3, 56(%1)\n"
20359 + "1: prefetch 320(%1)\n"
20360 + "2: movq (%1), %%mm0\n"
20361 + " movq 8(%1), %%mm1\n"
20362 + " movq 16(%1), %%mm2\n"
20363 + " movq 24(%1), %%mm3\n"
20364 + " movq %%mm0, (%2)\n"
20365 + " movq %%mm1, 8(%2)\n"
20366 + " movq %%mm2, 16(%2)\n"
20367 + " movq %%mm3, 24(%2)\n"
20368 + " movq 32(%1), %%mm0\n"
20369 + " movq 40(%1), %%mm1\n"
20370 + " movq 48(%1), %%mm2\n"
20371 + " movq 56(%1), %%mm3\n"
20372 + " movq %%mm0, 32(%2)\n"
20373 + " movq %%mm1, 40(%2)\n"
20374 + " movq %%mm2, 48(%2)\n"
20375 + " movq %%mm3, 56(%2)\n"
20376 ".section .fixup, \"ax\"\n"
20377 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20378 + "3:\n"
20379 +
20380 +#ifdef CONFIG_PAX_KERNEXEC
20381 + " movl %%cr0, %0\n"
20382 + " movl %0, %%eax\n"
20383 + " andl $0xFFFEFFFF, %%eax\n"
20384 + " movl %%eax, %%cr0\n"
20385 +#endif
20386 +
20387 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20388 +
20389 +#ifdef CONFIG_PAX_KERNEXEC
20390 + " movl %0, %%cr0\n"
20391 +#endif
20392 +
20393 " jmp 2b\n"
20394 ".previous\n"
20395 _ASM_EXTABLE(1b, 3b)
20396 - : : "r" (from), "r" (to) : "memory");
20397 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20398
20399 from += 64;
20400 to += 64;
20401 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20402 index 69fa106..adda88b 100644
20403 --- a/arch/x86/lib/msr-reg.S
20404 +++ b/arch/x86/lib/msr-reg.S
20405 @@ -3,6 +3,7 @@
20406 #include <asm/dwarf2.h>
20407 #include <asm/asm.h>
20408 #include <asm/msr.h>
20409 +#include <asm/alternative-asm.h>
20410
20411 #ifdef CONFIG_X86_64
20412 /*
20413 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20414 CFI_STARTPROC
20415 pushq_cfi %rbx
20416 pushq_cfi %rbp
20417 - movq %rdi, %r10 /* Save pointer */
20418 + movq %rdi, %r9 /* Save pointer */
20419 xorl %r11d, %r11d /* Return value */
20420 movl (%rdi), %eax
20421 movl 4(%rdi), %ecx
20422 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20423 movl 28(%rdi), %edi
20424 CFI_REMEMBER_STATE
20425 1: \op
20426 -2: movl %eax, (%r10)
20427 +2: movl %eax, (%r9)
20428 movl %r11d, %eax /* Return value */
20429 - movl %ecx, 4(%r10)
20430 - movl %edx, 8(%r10)
20431 - movl %ebx, 12(%r10)
20432 - movl %ebp, 20(%r10)
20433 - movl %esi, 24(%r10)
20434 - movl %edi, 28(%r10)
20435 + movl %ecx, 4(%r9)
20436 + movl %edx, 8(%r9)
20437 + movl %ebx, 12(%r9)
20438 + movl %ebp, 20(%r9)
20439 + movl %esi, 24(%r9)
20440 + movl %edi, 28(%r9)
20441 popq_cfi %rbp
20442 popq_cfi %rbx
20443 + pax_force_retaddr
20444 ret
20445 3:
20446 CFI_RESTORE_STATE
20447 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20448 index 36b0d15..d381858 100644
20449 --- a/arch/x86/lib/putuser.S
20450 +++ b/arch/x86/lib/putuser.S
20451 @@ -15,7 +15,9 @@
20452 #include <asm/thread_info.h>
20453 #include <asm/errno.h>
20454 #include <asm/asm.h>
20455 -
20456 +#include <asm/segment.h>
20457 +#include <asm/pgtable.h>
20458 +#include <asm/alternative-asm.h>
20459
20460 /*
20461 * __put_user_X
20462 @@ -29,52 +31,119 @@
20463 * as they get called from within inline assembly.
20464 */
20465
20466 -#define ENTER CFI_STARTPROC ; \
20467 - GET_THREAD_INFO(%_ASM_BX)
20468 -#define EXIT ret ; \
20469 +#define ENTER CFI_STARTPROC
20470 +#define EXIT pax_force_retaddr; ret ; \
20471 CFI_ENDPROC
20472
20473 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20474 +#define _DEST %_ASM_CX,%_ASM_BX
20475 +#else
20476 +#define _DEST %_ASM_CX
20477 +#endif
20478 +
20479 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20480 +#define __copyuser_seg gs;
20481 +#else
20482 +#define __copyuser_seg
20483 +#endif
20484 +
20485 .text
20486 ENTRY(__put_user_1)
20487 ENTER
20488 +
20489 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20490 + GET_THREAD_INFO(%_ASM_BX)
20491 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20492 jae bad_put_user
20493 -1: movb %al,(%_ASM_CX)
20494 +
20495 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20496 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20497 + cmp %_ASM_BX,%_ASM_CX
20498 + jb 1234f
20499 + xor %ebx,%ebx
20500 +1234:
20501 +#endif
20502 +
20503 +#endif
20504 +
20505 +1: __copyuser_seg movb %al,(_DEST)
20506 xor %eax,%eax
20507 EXIT
20508 ENDPROC(__put_user_1)
20509
20510 ENTRY(__put_user_2)
20511 ENTER
20512 +
20513 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20514 + GET_THREAD_INFO(%_ASM_BX)
20515 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20516 sub $1,%_ASM_BX
20517 cmp %_ASM_BX,%_ASM_CX
20518 jae bad_put_user
20519 -2: movw %ax,(%_ASM_CX)
20520 +
20521 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20522 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20523 + cmp %_ASM_BX,%_ASM_CX
20524 + jb 1234f
20525 + xor %ebx,%ebx
20526 +1234:
20527 +#endif
20528 +
20529 +#endif
20530 +
20531 +2: __copyuser_seg movw %ax,(_DEST)
20532 xor %eax,%eax
20533 EXIT
20534 ENDPROC(__put_user_2)
20535
20536 ENTRY(__put_user_4)
20537 ENTER
20538 +
20539 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20540 + GET_THREAD_INFO(%_ASM_BX)
20541 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20542 sub $3,%_ASM_BX
20543 cmp %_ASM_BX,%_ASM_CX
20544 jae bad_put_user
20545 -3: movl %eax,(%_ASM_CX)
20546 +
20547 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20548 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20549 + cmp %_ASM_BX,%_ASM_CX
20550 + jb 1234f
20551 + xor %ebx,%ebx
20552 +1234:
20553 +#endif
20554 +
20555 +#endif
20556 +
20557 +3: __copyuser_seg movl %eax,(_DEST)
20558 xor %eax,%eax
20559 EXIT
20560 ENDPROC(__put_user_4)
20561
20562 ENTRY(__put_user_8)
20563 ENTER
20564 +
20565 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20566 + GET_THREAD_INFO(%_ASM_BX)
20567 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20568 sub $7,%_ASM_BX
20569 cmp %_ASM_BX,%_ASM_CX
20570 jae bad_put_user
20571 -4: mov %_ASM_AX,(%_ASM_CX)
20572 +
20573 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20574 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20575 + cmp %_ASM_BX,%_ASM_CX
20576 + jb 1234f
20577 + xor %ebx,%ebx
20578 +1234:
20579 +#endif
20580 +
20581 +#endif
20582 +
20583 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20584 #ifdef CONFIG_X86_32
20585 -5: movl %edx,4(%_ASM_CX)
20586 +5: __copyuser_seg movl %edx,4(_DEST)
20587 #endif
20588 xor %eax,%eax
20589 EXIT
20590 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20591 index 1cad221..de671ee 100644
20592 --- a/arch/x86/lib/rwlock.S
20593 +++ b/arch/x86/lib/rwlock.S
20594 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20595 FRAME
20596 0: LOCK_PREFIX
20597 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20598 +
20599 +#ifdef CONFIG_PAX_REFCOUNT
20600 + jno 1234f
20601 + LOCK_PREFIX
20602 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20603 + int $4
20604 +1234:
20605 + _ASM_EXTABLE(1234b, 1234b)
20606 +#endif
20607 +
20608 1: rep; nop
20609 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20610 jne 1b
20611 LOCK_PREFIX
20612 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20613 +
20614 +#ifdef CONFIG_PAX_REFCOUNT
20615 + jno 1234f
20616 + LOCK_PREFIX
20617 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20618 + int $4
20619 +1234:
20620 + _ASM_EXTABLE(1234b, 1234b)
20621 +#endif
20622 +
20623 jnz 0b
20624 ENDFRAME
20625 + pax_force_retaddr
20626 ret
20627 CFI_ENDPROC
20628 END(__write_lock_failed)
20629 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20630 FRAME
20631 0: LOCK_PREFIX
20632 READ_LOCK_SIZE(inc) (%__lock_ptr)
20633 +
20634 +#ifdef CONFIG_PAX_REFCOUNT
20635 + jno 1234f
20636 + LOCK_PREFIX
20637 + READ_LOCK_SIZE(dec) (%__lock_ptr)
20638 + int $4
20639 +1234:
20640 + _ASM_EXTABLE(1234b, 1234b)
20641 +#endif
20642 +
20643 1: rep; nop
20644 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20645 js 1b
20646 LOCK_PREFIX
20647 READ_LOCK_SIZE(dec) (%__lock_ptr)
20648 +
20649 +#ifdef CONFIG_PAX_REFCOUNT
20650 + jno 1234f
20651 + LOCK_PREFIX
20652 + READ_LOCK_SIZE(inc) (%__lock_ptr)
20653 + int $4
20654 +1234:
20655 + _ASM_EXTABLE(1234b, 1234b)
20656 +#endif
20657 +
20658 js 0b
20659 ENDFRAME
20660 + pax_force_retaddr
20661 ret
20662 CFI_ENDPROC
20663 END(__read_lock_failed)
20664 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20665 index 5dff5f0..cadebf4 100644
20666 --- a/arch/x86/lib/rwsem.S
20667 +++ b/arch/x86/lib/rwsem.S
20668 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20669 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20670 CFI_RESTORE __ASM_REG(dx)
20671 restore_common_regs
20672 + pax_force_retaddr
20673 ret
20674 CFI_ENDPROC
20675 ENDPROC(call_rwsem_down_read_failed)
20676 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20677 movq %rax,%rdi
20678 call rwsem_down_write_failed
20679 restore_common_regs
20680 + pax_force_retaddr
20681 ret
20682 CFI_ENDPROC
20683 ENDPROC(call_rwsem_down_write_failed)
20684 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20685 movq %rax,%rdi
20686 call rwsem_wake
20687 restore_common_regs
20688 -1: ret
20689 +1: pax_force_retaddr
20690 + ret
20691 CFI_ENDPROC
20692 ENDPROC(call_rwsem_wake)
20693
20694 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20695 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20696 CFI_RESTORE __ASM_REG(dx)
20697 restore_common_regs
20698 + pax_force_retaddr
20699 ret
20700 CFI_ENDPROC
20701 ENDPROC(call_rwsem_downgrade_wake)
20702 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20703 index a63efd6..ccecad8 100644
20704 --- a/arch/x86/lib/thunk_64.S
20705 +++ b/arch/x86/lib/thunk_64.S
20706 @@ -8,6 +8,7 @@
20707 #include <linux/linkage.h>
20708 #include <asm/dwarf2.h>
20709 #include <asm/calling.h>
20710 +#include <asm/alternative-asm.h>
20711
20712 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20713 .macro THUNK name, func, put_ret_addr_in_rdi=0
20714 @@ -41,5 +42,6 @@
20715 SAVE_ARGS
20716 restore:
20717 RESTORE_ARGS
20718 + pax_force_retaddr
20719 ret
20720 CFI_ENDPROC
20721 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20722 index e218d5d..35679b4 100644
20723 --- a/arch/x86/lib/usercopy_32.c
20724 +++ b/arch/x86/lib/usercopy_32.c
20725 @@ -43,7 +43,7 @@ do { \
20726 __asm__ __volatile__( \
20727 " testl %1,%1\n" \
20728 " jz 2f\n" \
20729 - "0: lodsb\n" \
20730 + "0: "__copyuser_seg"lodsb\n" \
20731 " stosb\n" \
20732 " testb %%al,%%al\n" \
20733 " jz 1f\n" \
20734 @@ -128,10 +128,12 @@ do { \
20735 int __d0; \
20736 might_fault(); \
20737 __asm__ __volatile__( \
20738 + __COPYUSER_SET_ES \
20739 "0: rep; stosl\n" \
20740 " movl %2,%0\n" \
20741 "1: rep; stosb\n" \
20742 "2:\n" \
20743 + __COPYUSER_RESTORE_ES \
20744 ".section .fixup,\"ax\"\n" \
20745 "3: lea 0(%2,%0,4),%0\n" \
20746 " jmp 2b\n" \
20747 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20748 might_fault();
20749
20750 __asm__ __volatile__(
20751 + __COPYUSER_SET_ES
20752 " testl %0, %0\n"
20753 " jz 3f\n"
20754 " andl %0,%%ecx\n"
20755 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20756 " subl %%ecx,%0\n"
20757 " addl %0,%%eax\n"
20758 "1:\n"
20759 + __COPYUSER_RESTORE_ES
20760 ".section .fixup,\"ax\"\n"
20761 "2: xorl %%eax,%%eax\n"
20762 " jmp 1b\n"
20763 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20764
20765 #ifdef CONFIG_X86_INTEL_USERCOPY
20766 static unsigned long
20767 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
20768 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20769 {
20770 int d0, d1;
20771 __asm__ __volatile__(
20772 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20773 " .align 2,0x90\n"
20774 "3: movl 0(%4), %%eax\n"
20775 "4: movl 4(%4), %%edx\n"
20776 - "5: movl %%eax, 0(%3)\n"
20777 - "6: movl %%edx, 4(%3)\n"
20778 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20779 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20780 "7: movl 8(%4), %%eax\n"
20781 "8: movl 12(%4),%%edx\n"
20782 - "9: movl %%eax, 8(%3)\n"
20783 - "10: movl %%edx, 12(%3)\n"
20784 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20785 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20786 "11: movl 16(%4), %%eax\n"
20787 "12: movl 20(%4), %%edx\n"
20788 - "13: movl %%eax, 16(%3)\n"
20789 - "14: movl %%edx, 20(%3)\n"
20790 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20791 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20792 "15: movl 24(%4), %%eax\n"
20793 "16: movl 28(%4), %%edx\n"
20794 - "17: movl %%eax, 24(%3)\n"
20795 - "18: movl %%edx, 28(%3)\n"
20796 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20797 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20798 "19: movl 32(%4), %%eax\n"
20799 "20: movl 36(%4), %%edx\n"
20800 - "21: movl %%eax, 32(%3)\n"
20801 - "22: movl %%edx, 36(%3)\n"
20802 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20803 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20804 "23: movl 40(%4), %%eax\n"
20805 "24: movl 44(%4), %%edx\n"
20806 - "25: movl %%eax, 40(%3)\n"
20807 - "26: movl %%edx, 44(%3)\n"
20808 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20809 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20810 "27: movl 48(%4), %%eax\n"
20811 "28: movl 52(%4), %%edx\n"
20812 - "29: movl %%eax, 48(%3)\n"
20813 - "30: movl %%edx, 52(%3)\n"
20814 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20815 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20816 "31: movl 56(%4), %%eax\n"
20817 "32: movl 60(%4), %%edx\n"
20818 - "33: movl %%eax, 56(%3)\n"
20819 - "34: movl %%edx, 60(%3)\n"
20820 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20821 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20822 " addl $-64, %0\n"
20823 " addl $64, %4\n"
20824 " addl $64, %3\n"
20825 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20826 " shrl $2, %0\n"
20827 " andl $3, %%eax\n"
20828 " cld\n"
20829 + __COPYUSER_SET_ES
20830 "99: rep; movsl\n"
20831 "36: movl %%eax, %0\n"
20832 "37: rep; movsb\n"
20833 "100:\n"
20834 + __COPYUSER_RESTORE_ES
20835 + ".section .fixup,\"ax\"\n"
20836 + "101: lea 0(%%eax,%0,4),%0\n"
20837 + " jmp 100b\n"
20838 + ".previous\n"
20839 + ".section __ex_table,\"a\"\n"
20840 + " .align 4\n"
20841 + " .long 1b,100b\n"
20842 + " .long 2b,100b\n"
20843 + " .long 3b,100b\n"
20844 + " .long 4b,100b\n"
20845 + " .long 5b,100b\n"
20846 + " .long 6b,100b\n"
20847 + " .long 7b,100b\n"
20848 + " .long 8b,100b\n"
20849 + " .long 9b,100b\n"
20850 + " .long 10b,100b\n"
20851 + " .long 11b,100b\n"
20852 + " .long 12b,100b\n"
20853 + " .long 13b,100b\n"
20854 + " .long 14b,100b\n"
20855 + " .long 15b,100b\n"
20856 + " .long 16b,100b\n"
20857 + " .long 17b,100b\n"
20858 + " .long 18b,100b\n"
20859 + " .long 19b,100b\n"
20860 + " .long 20b,100b\n"
20861 + " .long 21b,100b\n"
20862 + " .long 22b,100b\n"
20863 + " .long 23b,100b\n"
20864 + " .long 24b,100b\n"
20865 + " .long 25b,100b\n"
20866 + " .long 26b,100b\n"
20867 + " .long 27b,100b\n"
20868 + " .long 28b,100b\n"
20869 + " .long 29b,100b\n"
20870 + " .long 30b,100b\n"
20871 + " .long 31b,100b\n"
20872 + " .long 32b,100b\n"
20873 + " .long 33b,100b\n"
20874 + " .long 34b,100b\n"
20875 + " .long 35b,100b\n"
20876 + " .long 36b,100b\n"
20877 + " .long 37b,100b\n"
20878 + " .long 99b,101b\n"
20879 + ".previous"
20880 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20881 + : "1"(to), "2"(from), "0"(size)
20882 + : "eax", "edx", "memory");
20883 + return size;
20884 +}
20885 +
20886 +static unsigned long
20887 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20888 +{
20889 + int d0, d1;
20890 + __asm__ __volatile__(
20891 + " .align 2,0x90\n"
20892 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20893 + " cmpl $67, %0\n"
20894 + " jbe 3f\n"
20895 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20896 + " .align 2,0x90\n"
20897 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20898 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20899 + "5: movl %%eax, 0(%3)\n"
20900 + "6: movl %%edx, 4(%3)\n"
20901 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20902 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20903 + "9: movl %%eax, 8(%3)\n"
20904 + "10: movl %%edx, 12(%3)\n"
20905 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20906 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20907 + "13: movl %%eax, 16(%3)\n"
20908 + "14: movl %%edx, 20(%3)\n"
20909 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20910 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20911 + "17: movl %%eax, 24(%3)\n"
20912 + "18: movl %%edx, 28(%3)\n"
20913 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20914 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20915 + "21: movl %%eax, 32(%3)\n"
20916 + "22: movl %%edx, 36(%3)\n"
20917 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20918 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20919 + "25: movl %%eax, 40(%3)\n"
20920 + "26: movl %%edx, 44(%3)\n"
20921 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20922 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20923 + "29: movl %%eax, 48(%3)\n"
20924 + "30: movl %%edx, 52(%3)\n"
20925 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20926 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20927 + "33: movl %%eax, 56(%3)\n"
20928 + "34: movl %%edx, 60(%3)\n"
20929 + " addl $-64, %0\n"
20930 + " addl $64, %4\n"
20931 + " addl $64, %3\n"
20932 + " cmpl $63, %0\n"
20933 + " ja 1b\n"
20934 + "35: movl %0, %%eax\n"
20935 + " shrl $2, %0\n"
20936 + " andl $3, %%eax\n"
20937 + " cld\n"
20938 + "99: rep; "__copyuser_seg" movsl\n"
20939 + "36: movl %%eax, %0\n"
20940 + "37: rep; "__copyuser_seg" movsb\n"
20941 + "100:\n"
20942 ".section .fixup,\"ax\"\n"
20943 "101: lea 0(%%eax,%0,4),%0\n"
20944 " jmp 100b\n"
20945 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20946 int d0, d1;
20947 __asm__ __volatile__(
20948 " .align 2,0x90\n"
20949 - "0: movl 32(%4), %%eax\n"
20950 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20951 " cmpl $67, %0\n"
20952 " jbe 2f\n"
20953 - "1: movl 64(%4), %%eax\n"
20954 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20955 " .align 2,0x90\n"
20956 - "2: movl 0(%4), %%eax\n"
20957 - "21: movl 4(%4), %%edx\n"
20958 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20959 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20960 " movl %%eax, 0(%3)\n"
20961 " movl %%edx, 4(%3)\n"
20962 - "3: movl 8(%4), %%eax\n"
20963 - "31: movl 12(%4),%%edx\n"
20964 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20965 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20966 " movl %%eax, 8(%3)\n"
20967 " movl %%edx, 12(%3)\n"
20968 - "4: movl 16(%4), %%eax\n"
20969 - "41: movl 20(%4), %%edx\n"
20970 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20971 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20972 " movl %%eax, 16(%3)\n"
20973 " movl %%edx, 20(%3)\n"
20974 - "10: movl 24(%4), %%eax\n"
20975 - "51: movl 28(%4), %%edx\n"
20976 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20977 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20978 " movl %%eax, 24(%3)\n"
20979 " movl %%edx, 28(%3)\n"
20980 - "11: movl 32(%4), %%eax\n"
20981 - "61: movl 36(%4), %%edx\n"
20982 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20983 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20984 " movl %%eax, 32(%3)\n"
20985 " movl %%edx, 36(%3)\n"
20986 - "12: movl 40(%4), %%eax\n"
20987 - "71: movl 44(%4), %%edx\n"
20988 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20989 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20990 " movl %%eax, 40(%3)\n"
20991 " movl %%edx, 44(%3)\n"
20992 - "13: movl 48(%4), %%eax\n"
20993 - "81: movl 52(%4), %%edx\n"
20994 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20995 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20996 " movl %%eax, 48(%3)\n"
20997 " movl %%edx, 52(%3)\n"
20998 - "14: movl 56(%4), %%eax\n"
20999 - "91: movl 60(%4), %%edx\n"
21000 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21001 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21002 " movl %%eax, 56(%3)\n"
21003 " movl %%edx, 60(%3)\n"
21004 " addl $-64, %0\n"
21005 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21006 " shrl $2, %0\n"
21007 " andl $3, %%eax\n"
21008 " cld\n"
21009 - "6: rep; movsl\n"
21010 + "6: rep; "__copyuser_seg" movsl\n"
21011 " movl %%eax,%0\n"
21012 - "7: rep; movsb\n"
21013 + "7: rep; "__copyuser_seg" movsb\n"
21014 "8:\n"
21015 ".section .fixup,\"ax\"\n"
21016 "9: lea 0(%%eax,%0,4),%0\n"
21017 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21018
21019 __asm__ __volatile__(
21020 " .align 2,0x90\n"
21021 - "0: movl 32(%4), %%eax\n"
21022 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21023 " cmpl $67, %0\n"
21024 " jbe 2f\n"
21025 - "1: movl 64(%4), %%eax\n"
21026 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21027 " .align 2,0x90\n"
21028 - "2: movl 0(%4), %%eax\n"
21029 - "21: movl 4(%4), %%edx\n"
21030 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21031 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21032 " movnti %%eax, 0(%3)\n"
21033 " movnti %%edx, 4(%3)\n"
21034 - "3: movl 8(%4), %%eax\n"
21035 - "31: movl 12(%4),%%edx\n"
21036 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21037 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21038 " movnti %%eax, 8(%3)\n"
21039 " movnti %%edx, 12(%3)\n"
21040 - "4: movl 16(%4), %%eax\n"
21041 - "41: movl 20(%4), %%edx\n"
21042 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21043 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21044 " movnti %%eax, 16(%3)\n"
21045 " movnti %%edx, 20(%3)\n"
21046 - "10: movl 24(%4), %%eax\n"
21047 - "51: movl 28(%4), %%edx\n"
21048 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21049 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21050 " movnti %%eax, 24(%3)\n"
21051 " movnti %%edx, 28(%3)\n"
21052 - "11: movl 32(%4), %%eax\n"
21053 - "61: movl 36(%4), %%edx\n"
21054 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21055 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21056 " movnti %%eax, 32(%3)\n"
21057 " movnti %%edx, 36(%3)\n"
21058 - "12: movl 40(%4), %%eax\n"
21059 - "71: movl 44(%4), %%edx\n"
21060 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21061 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21062 " movnti %%eax, 40(%3)\n"
21063 " movnti %%edx, 44(%3)\n"
21064 - "13: movl 48(%4), %%eax\n"
21065 - "81: movl 52(%4), %%edx\n"
21066 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21067 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21068 " movnti %%eax, 48(%3)\n"
21069 " movnti %%edx, 52(%3)\n"
21070 - "14: movl 56(%4), %%eax\n"
21071 - "91: movl 60(%4), %%edx\n"
21072 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21073 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21074 " movnti %%eax, 56(%3)\n"
21075 " movnti %%edx, 60(%3)\n"
21076 " addl $-64, %0\n"
21077 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21078 " shrl $2, %0\n"
21079 " andl $3, %%eax\n"
21080 " cld\n"
21081 - "6: rep; movsl\n"
21082 + "6: rep; "__copyuser_seg" movsl\n"
21083 " movl %%eax,%0\n"
21084 - "7: rep; movsb\n"
21085 + "7: rep; "__copyuser_seg" movsb\n"
21086 "8:\n"
21087 ".section .fixup,\"ax\"\n"
21088 "9: lea 0(%%eax,%0,4),%0\n"
21089 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21090
21091 __asm__ __volatile__(
21092 " .align 2,0x90\n"
21093 - "0: movl 32(%4), %%eax\n"
21094 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21095 " cmpl $67, %0\n"
21096 " jbe 2f\n"
21097 - "1: movl 64(%4), %%eax\n"
21098 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21099 " .align 2,0x90\n"
21100 - "2: movl 0(%4), %%eax\n"
21101 - "21: movl 4(%4), %%edx\n"
21102 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21103 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21104 " movnti %%eax, 0(%3)\n"
21105 " movnti %%edx, 4(%3)\n"
21106 - "3: movl 8(%4), %%eax\n"
21107 - "31: movl 12(%4),%%edx\n"
21108 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21109 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21110 " movnti %%eax, 8(%3)\n"
21111 " movnti %%edx, 12(%3)\n"
21112 - "4: movl 16(%4), %%eax\n"
21113 - "41: movl 20(%4), %%edx\n"
21114 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21115 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21116 " movnti %%eax, 16(%3)\n"
21117 " movnti %%edx, 20(%3)\n"
21118 - "10: movl 24(%4), %%eax\n"
21119 - "51: movl 28(%4), %%edx\n"
21120 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21121 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21122 " movnti %%eax, 24(%3)\n"
21123 " movnti %%edx, 28(%3)\n"
21124 - "11: movl 32(%4), %%eax\n"
21125 - "61: movl 36(%4), %%edx\n"
21126 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21127 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21128 " movnti %%eax, 32(%3)\n"
21129 " movnti %%edx, 36(%3)\n"
21130 - "12: movl 40(%4), %%eax\n"
21131 - "71: movl 44(%4), %%edx\n"
21132 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21133 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21134 " movnti %%eax, 40(%3)\n"
21135 " movnti %%edx, 44(%3)\n"
21136 - "13: movl 48(%4), %%eax\n"
21137 - "81: movl 52(%4), %%edx\n"
21138 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21139 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21140 " movnti %%eax, 48(%3)\n"
21141 " movnti %%edx, 52(%3)\n"
21142 - "14: movl 56(%4), %%eax\n"
21143 - "91: movl 60(%4), %%edx\n"
21144 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21145 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21146 " movnti %%eax, 56(%3)\n"
21147 " movnti %%edx, 60(%3)\n"
21148 " addl $-64, %0\n"
21149 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21150 " shrl $2, %0\n"
21151 " andl $3, %%eax\n"
21152 " cld\n"
21153 - "6: rep; movsl\n"
21154 + "6: rep; "__copyuser_seg" movsl\n"
21155 " movl %%eax,%0\n"
21156 - "7: rep; movsb\n"
21157 + "7: rep; "__copyuser_seg" movsb\n"
21158 "8:\n"
21159 ".section .fixup,\"ax\"\n"
21160 "9: lea 0(%%eax,%0,4),%0\n"
21161 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21162 */
21163 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21164 unsigned long size);
21165 -unsigned long __copy_user_intel(void __user *to, const void *from,
21166 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21167 + unsigned long size);
21168 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21169 unsigned long size);
21170 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21171 const void __user *from, unsigned long size);
21172 #endif /* CONFIG_X86_INTEL_USERCOPY */
21173
21174 /* Generic arbitrary sized copy. */
21175 -#define __copy_user(to, from, size) \
21176 +#define __copy_user(to, from, size, prefix, set, restore) \
21177 do { \
21178 int __d0, __d1, __d2; \
21179 __asm__ __volatile__( \
21180 + set \
21181 " cmp $7,%0\n" \
21182 " jbe 1f\n" \
21183 " movl %1,%0\n" \
21184 " negl %0\n" \
21185 " andl $7,%0\n" \
21186 " subl %0,%3\n" \
21187 - "4: rep; movsb\n" \
21188 + "4: rep; "prefix"movsb\n" \
21189 " movl %3,%0\n" \
21190 " shrl $2,%0\n" \
21191 " andl $3,%3\n" \
21192 " .align 2,0x90\n" \
21193 - "0: rep; movsl\n" \
21194 + "0: rep; "prefix"movsl\n" \
21195 " movl %3,%0\n" \
21196 - "1: rep; movsb\n" \
21197 + "1: rep; "prefix"movsb\n" \
21198 "2:\n" \
21199 + restore \
21200 ".section .fixup,\"ax\"\n" \
21201 "5: addl %3,%0\n" \
21202 " jmp 2b\n" \
21203 @@ -682,14 +799,14 @@ do { \
21204 " negl %0\n" \
21205 " andl $7,%0\n" \
21206 " subl %0,%3\n" \
21207 - "4: rep; movsb\n" \
21208 + "4: rep; "__copyuser_seg"movsb\n" \
21209 " movl %3,%0\n" \
21210 " shrl $2,%0\n" \
21211 " andl $3,%3\n" \
21212 " .align 2,0x90\n" \
21213 - "0: rep; movsl\n" \
21214 + "0: rep; "__copyuser_seg"movsl\n" \
21215 " movl %3,%0\n" \
21216 - "1: rep; movsb\n" \
21217 + "1: rep; "__copyuser_seg"movsb\n" \
21218 "2:\n" \
21219 ".section .fixup,\"ax\"\n" \
21220 "5: addl %3,%0\n" \
21221 @@ -775,9 +892,9 @@ survive:
21222 }
21223 #endif
21224 if (movsl_is_ok(to, from, n))
21225 - __copy_user(to, from, n);
21226 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21227 else
21228 - n = __copy_user_intel(to, from, n);
21229 + n = __generic_copy_to_user_intel(to, from, n);
21230 return n;
21231 }
21232 EXPORT_SYMBOL(__copy_to_user_ll);
21233 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21234 unsigned long n)
21235 {
21236 if (movsl_is_ok(to, from, n))
21237 - __copy_user(to, from, n);
21238 + __copy_user(to, from, n, __copyuser_seg, "", "");
21239 else
21240 - n = __copy_user_intel((void __user *)to,
21241 - (const void *)from, n);
21242 + n = __generic_copy_from_user_intel(to, from, n);
21243 return n;
21244 }
21245 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21246 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21247 if (n > 64 && cpu_has_xmm2)
21248 n = __copy_user_intel_nocache(to, from, n);
21249 else
21250 - __copy_user(to, from, n);
21251 + __copy_user(to, from, n, __copyuser_seg, "", "");
21252 #else
21253 - __copy_user(to, from, n);
21254 + __copy_user(to, from, n, __copyuser_seg, "", "");
21255 #endif
21256 return n;
21257 }
21258 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21259
21260 -/**
21261 - * copy_to_user: - Copy a block of data into user space.
21262 - * @to: Destination address, in user space.
21263 - * @from: Source address, in kernel space.
21264 - * @n: Number of bytes to copy.
21265 - *
21266 - * Context: User context only. This function may sleep.
21267 - *
21268 - * Copy data from kernel space to user space.
21269 - *
21270 - * Returns number of bytes that could not be copied.
21271 - * On success, this will be zero.
21272 - */
21273 -unsigned long
21274 -copy_to_user(void __user *to, const void *from, unsigned long n)
21275 -{
21276 - if (access_ok(VERIFY_WRITE, to, n))
21277 - n = __copy_to_user(to, from, n);
21278 - return n;
21279 -}
21280 -EXPORT_SYMBOL(copy_to_user);
21281 -
21282 -/**
21283 - * copy_from_user: - Copy a block of data from user space.
21284 - * @to: Destination address, in kernel space.
21285 - * @from: Source address, in user space.
21286 - * @n: Number of bytes to copy.
21287 - *
21288 - * Context: User context only. This function may sleep.
21289 - *
21290 - * Copy data from user space to kernel space.
21291 - *
21292 - * Returns number of bytes that could not be copied.
21293 - * On success, this will be zero.
21294 - *
21295 - * If some data could not be copied, this function will pad the copied
21296 - * data to the requested size using zero bytes.
21297 - */
21298 -unsigned long
21299 -_copy_from_user(void *to, const void __user *from, unsigned long n)
21300 -{
21301 - if (access_ok(VERIFY_READ, from, n))
21302 - n = __copy_from_user(to, from, n);
21303 - else
21304 - memset(to, 0, n);
21305 - return n;
21306 -}
21307 -EXPORT_SYMBOL(_copy_from_user);
21308 -
21309 void copy_from_user_overflow(void)
21310 {
21311 WARN(1, "Buffer overflow detected!\n");
21312 }
21313 EXPORT_SYMBOL(copy_from_user_overflow);
21314 +
21315 +void copy_to_user_overflow(void)
21316 +{
21317 + WARN(1, "Buffer overflow detected!\n");
21318 +}
21319 +EXPORT_SYMBOL(copy_to_user_overflow);
21320 +
21321 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21322 +void __set_fs(mm_segment_t x)
21323 +{
21324 + switch (x.seg) {
21325 + case 0:
21326 + loadsegment(gs, 0);
21327 + break;
21328 + case TASK_SIZE_MAX:
21329 + loadsegment(gs, __USER_DS);
21330 + break;
21331 + case -1UL:
21332 + loadsegment(gs, __KERNEL_DS);
21333 + break;
21334 + default:
21335 + BUG();
21336 + }
21337 + return;
21338 +}
21339 +EXPORT_SYMBOL(__set_fs);
21340 +
21341 +void set_fs(mm_segment_t x)
21342 +{
21343 + current_thread_info()->addr_limit = x;
21344 + __set_fs(x);
21345 +}
21346 +EXPORT_SYMBOL(set_fs);
21347 +#endif
21348 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21349 index b7c2849..8633ad8 100644
21350 --- a/arch/x86/lib/usercopy_64.c
21351 +++ b/arch/x86/lib/usercopy_64.c
21352 @@ -42,6 +42,12 @@ long
21353 __strncpy_from_user(char *dst, const char __user *src, long count)
21354 {
21355 long res;
21356 +
21357 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21358 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21359 + src += PAX_USER_SHADOW_BASE;
21360 +#endif
21361 +
21362 __do_strncpy_from_user(dst, src, count, res);
21363 return res;
21364 }
21365 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21366 {
21367 long __d0;
21368 might_fault();
21369 +
21370 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21371 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21372 + addr += PAX_USER_SHADOW_BASE;
21373 +#endif
21374 +
21375 /* no memory constraint because it doesn't change any memory gcc knows
21376 about */
21377 asm volatile(
21378 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21379 }
21380 EXPORT_SYMBOL(strlen_user);
21381
21382 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21383 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21384 {
21385 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21386 - return copy_user_generic((__force void *)to, (__force void *)from, len);
21387 - }
21388 - return len;
21389 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21390 +
21391 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21392 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21393 + to += PAX_USER_SHADOW_BASE;
21394 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21395 + from += PAX_USER_SHADOW_BASE;
21396 +#endif
21397 +
21398 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21399 + }
21400 + return len;
21401 }
21402 EXPORT_SYMBOL(copy_in_user);
21403
21404 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21405 * it is not necessary to optimize tail handling.
21406 */
21407 unsigned long
21408 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21409 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21410 {
21411 char c;
21412 unsigned zero_len;
21413 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21414 index d0474ad..36e9257 100644
21415 --- a/arch/x86/mm/extable.c
21416 +++ b/arch/x86/mm/extable.c
21417 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21418 const struct exception_table_entry *fixup;
21419
21420 #ifdef CONFIG_PNPBIOS
21421 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21422 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21423 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21424 extern u32 pnp_bios_is_utter_crap;
21425 pnp_bios_is_utter_crap = 1;
21426 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21427 index 5db0490..13bd09c 100644
21428 --- a/arch/x86/mm/fault.c
21429 +++ b/arch/x86/mm/fault.c
21430 @@ -13,11 +13,18 @@
21431 #include <linux/perf_event.h> /* perf_sw_event */
21432 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21433 #include <linux/prefetch.h> /* prefetchw */
21434 +#include <linux/unistd.h>
21435 +#include <linux/compiler.h>
21436
21437 #include <asm/traps.h> /* dotraplinkage, ... */
21438 #include <asm/pgalloc.h> /* pgd_*(), ... */
21439 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21440 #include <asm/fixmap.h> /* VSYSCALL_START */
21441 +#include <asm/tlbflush.h>
21442 +
21443 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21444 +#include <asm/stacktrace.h>
21445 +#endif
21446
21447 /*
21448 * Page fault error code bits:
21449 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21450 int ret = 0;
21451
21452 /* kprobe_running() needs smp_processor_id() */
21453 - if (kprobes_built_in() && !user_mode_vm(regs)) {
21454 + if (kprobes_built_in() && !user_mode(regs)) {
21455 preempt_disable();
21456 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21457 ret = 1;
21458 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21459 return !instr_lo || (instr_lo>>1) == 1;
21460 case 0x00:
21461 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21462 - if (probe_kernel_address(instr, opcode))
21463 + if (user_mode(regs)) {
21464 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21465 + return 0;
21466 + } else if (probe_kernel_address(instr, opcode))
21467 return 0;
21468
21469 *prefetch = (instr_lo == 0xF) &&
21470 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21471 while (instr < max_instr) {
21472 unsigned char opcode;
21473
21474 - if (probe_kernel_address(instr, opcode))
21475 + if (user_mode(regs)) {
21476 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21477 + break;
21478 + } else if (probe_kernel_address(instr, opcode))
21479 break;
21480
21481 instr++;
21482 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21483 force_sig_info(si_signo, &info, tsk);
21484 }
21485
21486 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21487 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21488 +#endif
21489 +
21490 +#ifdef CONFIG_PAX_EMUTRAMP
21491 +static int pax_handle_fetch_fault(struct pt_regs *regs);
21492 +#endif
21493 +
21494 +#ifdef CONFIG_PAX_PAGEEXEC
21495 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21496 +{
21497 + pgd_t *pgd;
21498 + pud_t *pud;
21499 + pmd_t *pmd;
21500 +
21501 + pgd = pgd_offset(mm, address);
21502 + if (!pgd_present(*pgd))
21503 + return NULL;
21504 + pud = pud_offset(pgd, address);
21505 + if (!pud_present(*pud))
21506 + return NULL;
21507 + pmd = pmd_offset(pud, address);
21508 + if (!pmd_present(*pmd))
21509 + return NULL;
21510 + return pmd;
21511 +}
21512 +#endif
21513 +
21514 DEFINE_SPINLOCK(pgd_lock);
21515 LIST_HEAD(pgd_list);
21516
21517 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21518 for (address = VMALLOC_START & PMD_MASK;
21519 address >= TASK_SIZE && address < FIXADDR_TOP;
21520 address += PMD_SIZE) {
21521 +
21522 +#ifdef CONFIG_PAX_PER_CPU_PGD
21523 + unsigned long cpu;
21524 +#else
21525 struct page *page;
21526 +#endif
21527
21528 spin_lock(&pgd_lock);
21529 +
21530 +#ifdef CONFIG_PAX_PER_CPU_PGD
21531 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21532 + pgd_t *pgd = get_cpu_pgd(cpu);
21533 + pmd_t *ret;
21534 +#else
21535 list_for_each_entry(page, &pgd_list, lru) {
21536 + pgd_t *pgd = page_address(page);
21537 spinlock_t *pgt_lock;
21538 pmd_t *ret;
21539
21540 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21541 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21542
21543 spin_lock(pgt_lock);
21544 - ret = vmalloc_sync_one(page_address(page), address);
21545 +#endif
21546 +
21547 + ret = vmalloc_sync_one(pgd, address);
21548 +
21549 +#ifndef CONFIG_PAX_PER_CPU_PGD
21550 spin_unlock(pgt_lock);
21551 +#endif
21552
21553 if (!ret)
21554 break;
21555 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21556 * an interrupt in the middle of a task switch..
21557 */
21558 pgd_paddr = read_cr3();
21559 +
21560 +#ifdef CONFIG_PAX_PER_CPU_PGD
21561 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21562 +#endif
21563 +
21564 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21565 if (!pmd_k)
21566 return -1;
21567 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21568 * happen within a race in page table update. In the later
21569 * case just flush:
21570 */
21571 +
21572 +#ifdef CONFIG_PAX_PER_CPU_PGD
21573 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21574 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21575 +#else
21576 pgd = pgd_offset(current->active_mm, address);
21577 +#endif
21578 +
21579 pgd_ref = pgd_offset_k(address);
21580 if (pgd_none(*pgd_ref))
21581 return -1;
21582 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21583 static int is_errata100(struct pt_regs *regs, unsigned long address)
21584 {
21585 #ifdef CONFIG_X86_64
21586 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21587 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21588 return 1;
21589 #endif
21590 return 0;
21591 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21592 }
21593
21594 static const char nx_warning[] = KERN_CRIT
21595 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21596 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21597
21598 static void
21599 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21600 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21601 if (!oops_may_print())
21602 return;
21603
21604 - if (error_code & PF_INSTR) {
21605 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21606 unsigned int level;
21607
21608 pte_t *pte = lookup_address(address, &level);
21609
21610 if (pte && pte_present(*pte) && !pte_exec(*pte))
21611 - printk(nx_warning, current_uid());
21612 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21613 }
21614
21615 +#ifdef CONFIG_PAX_KERNEXEC
21616 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21617 + if (current->signal->curr_ip)
21618 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21619 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21620 + else
21621 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21622 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21623 + }
21624 +#endif
21625 +
21626 printk(KERN_ALERT "BUG: unable to handle kernel ");
21627 if (address < PAGE_SIZE)
21628 printk(KERN_CONT "NULL pointer dereference");
21629 @@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21630 }
21631 #endif
21632
21633 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21634 + if (pax_is_fetch_fault(regs, error_code, address)) {
21635 +
21636 +#ifdef CONFIG_PAX_EMUTRAMP
21637 + switch (pax_handle_fetch_fault(regs)) {
21638 + case 2:
21639 + return;
21640 + }
21641 +#endif
21642 +
21643 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21644 + do_group_exit(SIGKILL);
21645 + }
21646 +#endif
21647 +
21648 if (unlikely(show_unhandled_signals))
21649 show_signal_msg(regs, error_code, address, tsk);
21650
21651 @@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21652 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21653 printk(KERN_ERR
21654 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21655 - tsk->comm, tsk->pid, address);
21656 + tsk->comm, task_pid_nr(tsk), address);
21657 code = BUS_MCEERR_AR;
21658 }
21659 #endif
21660 @@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21661 return 1;
21662 }
21663
21664 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21665 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21666 +{
21667 + pte_t *pte;
21668 + pmd_t *pmd;
21669 + spinlock_t *ptl;
21670 + unsigned char pte_mask;
21671 +
21672 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21673 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21674 + return 0;
21675 +
21676 + /* PaX: it's our fault, let's handle it if we can */
21677 +
21678 + /* PaX: take a look at read faults before acquiring any locks */
21679 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21680 + /* instruction fetch attempt from a protected page in user mode */
21681 + up_read(&mm->mmap_sem);
21682 +
21683 +#ifdef CONFIG_PAX_EMUTRAMP
21684 + switch (pax_handle_fetch_fault(regs)) {
21685 + case 2:
21686 + return 1;
21687 + }
21688 +#endif
21689 +
21690 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21691 + do_group_exit(SIGKILL);
21692 + }
21693 +
21694 + pmd = pax_get_pmd(mm, address);
21695 + if (unlikely(!pmd))
21696 + return 0;
21697 +
21698 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21699 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21700 + pte_unmap_unlock(pte, ptl);
21701 + return 0;
21702 + }
21703 +
21704 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21705 + /* write attempt to a protected page in user mode */
21706 + pte_unmap_unlock(pte, ptl);
21707 + return 0;
21708 + }
21709 +
21710 +#ifdef CONFIG_SMP
21711 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21712 +#else
21713 + if (likely(address > get_limit(regs->cs)))
21714 +#endif
21715 + {
21716 + set_pte(pte, pte_mkread(*pte));
21717 + __flush_tlb_one(address);
21718 + pte_unmap_unlock(pte, ptl);
21719 + up_read(&mm->mmap_sem);
21720 + return 1;
21721 + }
21722 +
21723 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21724 +
21725 + /*
21726 + * PaX: fill DTLB with user rights and retry
21727 + */
21728 + __asm__ __volatile__ (
21729 + "orb %2,(%1)\n"
21730 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21731 +/*
21732 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21733 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21734 + * page fault when examined during a TLB load attempt. this is true not only
21735 + * for PTEs holding a non-present entry but also present entries that will
21736 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21737 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21738 + * for our target pages since their PTEs are simply not in the TLBs at all.
21739 +
21740 + * the best thing in omitting it is that we gain around 15-20% speed in the
21741 + * fast path of the page fault handler and can get rid of tracing since we
21742 + * can no longer flush unintended entries.
21743 + */
21744 + "invlpg (%0)\n"
21745 +#endif
21746 + __copyuser_seg"testb $0,(%0)\n"
21747 + "xorb %3,(%1)\n"
21748 + :
21749 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21750 + : "memory", "cc");
21751 + pte_unmap_unlock(pte, ptl);
21752 + up_read(&mm->mmap_sem);
21753 + return 1;
21754 +}
21755 +#endif
21756 +
21757 /*
21758 * Handle a spurious fault caused by a stale TLB entry.
21759 *
21760 @@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
21761 static inline int
21762 access_error(unsigned long error_code, struct vm_area_struct *vma)
21763 {
21764 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21765 + return 1;
21766 +
21767 if (error_code & PF_WRITE) {
21768 /* write, present and write, not present: */
21769 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21770 @@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21771 {
21772 struct vm_area_struct *vma;
21773 struct task_struct *tsk;
21774 - unsigned long address;
21775 struct mm_struct *mm;
21776 int fault;
21777 int write = error_code & PF_WRITE;
21778 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21779 (write ? FAULT_FLAG_WRITE : 0);
21780
21781 - tsk = current;
21782 - mm = tsk->mm;
21783 -
21784 /* Get the faulting address: */
21785 - address = read_cr2();
21786 + unsigned long address = read_cr2();
21787 +
21788 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21789 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21790 + if (!search_exception_tables(regs->ip)) {
21791 + bad_area_nosemaphore(regs, error_code, address);
21792 + return;
21793 + }
21794 + if (address < PAX_USER_SHADOW_BASE) {
21795 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21796 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21797 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21798 + } else
21799 + address -= PAX_USER_SHADOW_BASE;
21800 + }
21801 +#endif
21802 +
21803 + tsk = current;
21804 + mm = tsk->mm;
21805
21806 /*
21807 * Detect and handle instructions that would cause a page fault for
21808 @@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21809 * User-mode registers count as a user access even for any
21810 * potential system fault or CPU buglet:
21811 */
21812 - if (user_mode_vm(regs)) {
21813 + if (user_mode(regs)) {
21814 local_irq_enable();
21815 error_code |= PF_USER;
21816 } else {
21817 @@ -1122,6 +1328,11 @@ retry:
21818 might_sleep();
21819 }
21820
21821 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21822 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21823 + return;
21824 +#endif
21825 +
21826 vma = find_vma(mm, address);
21827 if (unlikely(!vma)) {
21828 bad_area(regs, error_code, address);
21829 @@ -1133,18 +1344,24 @@ retry:
21830 bad_area(regs, error_code, address);
21831 return;
21832 }
21833 - if (error_code & PF_USER) {
21834 - /*
21835 - * Accessing the stack below %sp is always a bug.
21836 - * The large cushion allows instructions like enter
21837 - * and pusha to work. ("enter $65535, $31" pushes
21838 - * 32 pointers and then decrements %sp by 65535.)
21839 - */
21840 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21841 - bad_area(regs, error_code, address);
21842 - return;
21843 - }
21844 + /*
21845 + * Accessing the stack below %sp is always a bug.
21846 + * The large cushion allows instructions like enter
21847 + * and pusha to work. ("enter $65535, $31" pushes
21848 + * 32 pointers and then decrements %sp by 65535.)
21849 + */
21850 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21851 + bad_area(regs, error_code, address);
21852 + return;
21853 }
21854 +
21855 +#ifdef CONFIG_PAX_SEGMEXEC
21856 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21857 + bad_area(regs, error_code, address);
21858 + return;
21859 + }
21860 +#endif
21861 +
21862 if (unlikely(expand_stack(vma, address))) {
21863 bad_area(regs, error_code, address);
21864 return;
21865 @@ -1199,3 +1416,292 @@ good_area:
21866
21867 up_read(&mm->mmap_sem);
21868 }
21869 +
21870 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21871 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21872 +{
21873 + struct mm_struct *mm = current->mm;
21874 + unsigned long ip = regs->ip;
21875 +
21876 + if (v8086_mode(regs))
21877 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21878 +
21879 +#ifdef CONFIG_PAX_PAGEEXEC
21880 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21881 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21882 + return true;
21883 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21884 + return true;
21885 + return false;
21886 + }
21887 +#endif
21888 +
21889 +#ifdef CONFIG_PAX_SEGMEXEC
21890 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21891 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21892 + return true;
21893 + return false;
21894 + }
21895 +#endif
21896 +
21897 + return false;
21898 +}
21899 +#endif
21900 +
21901 +#ifdef CONFIG_PAX_EMUTRAMP
21902 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21903 +{
21904 + int err;
21905 +
21906 + do { /* PaX: libffi trampoline emulation */
21907 + unsigned char mov, jmp;
21908 + unsigned int addr1, addr2;
21909 +
21910 +#ifdef CONFIG_X86_64
21911 + if ((regs->ip + 9) >> 32)
21912 + break;
21913 +#endif
21914 +
21915 + err = get_user(mov, (unsigned char __user *)regs->ip);
21916 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21917 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21918 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21919 +
21920 + if (err)
21921 + break;
21922 +
21923 + if (mov == 0xB8 && jmp == 0xE9) {
21924 + regs->ax = addr1;
21925 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21926 + return 2;
21927 + }
21928 + } while (0);
21929 +
21930 + do { /* PaX: gcc trampoline emulation #1 */
21931 + unsigned char mov1, mov2;
21932 + unsigned short jmp;
21933 + unsigned int addr1, addr2;
21934 +
21935 +#ifdef CONFIG_X86_64
21936 + if ((regs->ip + 11) >> 32)
21937 + break;
21938 +#endif
21939 +
21940 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21941 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21942 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21943 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21944 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21945 +
21946 + if (err)
21947 + break;
21948 +
21949 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21950 + regs->cx = addr1;
21951 + regs->ax = addr2;
21952 + regs->ip = addr2;
21953 + return 2;
21954 + }
21955 + } while (0);
21956 +
21957 + do { /* PaX: gcc trampoline emulation #2 */
21958 + unsigned char mov, jmp;
21959 + unsigned int addr1, addr2;
21960 +
21961 +#ifdef CONFIG_X86_64
21962 + if ((regs->ip + 9) >> 32)
21963 + break;
21964 +#endif
21965 +
21966 + err = get_user(mov, (unsigned char __user *)regs->ip);
21967 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21968 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21969 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21970 +
21971 + if (err)
21972 + break;
21973 +
21974 + if (mov == 0xB9 && jmp == 0xE9) {
21975 + regs->cx = addr1;
21976 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21977 + return 2;
21978 + }
21979 + } while (0);
21980 +
21981 + return 1; /* PaX in action */
21982 +}
21983 +
21984 +#ifdef CONFIG_X86_64
21985 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21986 +{
21987 + int err;
21988 +
21989 + do { /* PaX: libffi trampoline emulation */
21990 + unsigned short mov1, mov2, jmp1;
21991 + unsigned char stcclc, jmp2;
21992 + unsigned long addr1, addr2;
21993 +
21994 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21995 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21996 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21997 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21998 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
21999 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22000 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22001 +
22002 + if (err)
22003 + break;
22004 +
22005 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22006 + regs->r11 = addr1;
22007 + regs->r10 = addr2;
22008 + if (stcclc == 0xF8)
22009 + regs->flags &= ~X86_EFLAGS_CF;
22010 + else
22011 + regs->flags |= X86_EFLAGS_CF;
22012 + regs->ip = addr1;
22013 + return 2;
22014 + }
22015 + } while (0);
22016 +
22017 + do { /* PaX: gcc trampoline emulation #1 */
22018 + unsigned short mov1, mov2, jmp1;
22019 + unsigned char jmp2;
22020 + unsigned int addr1;
22021 + unsigned long addr2;
22022 +
22023 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22024 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22025 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22026 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22027 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22028 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22029 +
22030 + if (err)
22031 + break;
22032 +
22033 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22034 + regs->r11 = addr1;
22035 + regs->r10 = addr2;
22036 + regs->ip = addr1;
22037 + return 2;
22038 + }
22039 + } while (0);
22040 +
22041 + do { /* PaX: gcc trampoline emulation #2 */
22042 + unsigned short mov1, mov2, jmp1;
22043 + unsigned char jmp2;
22044 + unsigned long addr1, addr2;
22045 +
22046 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22047 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22048 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22049 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22050 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22051 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22052 +
22053 + if (err)
22054 + break;
22055 +
22056 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22057 + regs->r11 = addr1;
22058 + regs->r10 = addr2;
22059 + regs->ip = addr1;
22060 + return 2;
22061 + }
22062 + } while (0);
22063 +
22064 + return 1; /* PaX in action */
22065 +}
22066 +#endif
22067 +
22068 +/*
22069 + * PaX: decide what to do with offenders (regs->ip = fault address)
22070 + *
22071 + * returns 1 when task should be killed
22072 + * 2 when gcc trampoline was detected
22073 + */
22074 +static int pax_handle_fetch_fault(struct pt_regs *regs)
22075 +{
22076 + if (v8086_mode(regs))
22077 + return 1;
22078 +
22079 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22080 + return 1;
22081 +
22082 +#ifdef CONFIG_X86_32
22083 + return pax_handle_fetch_fault_32(regs);
22084 +#else
22085 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22086 + return pax_handle_fetch_fault_32(regs);
22087 + else
22088 + return pax_handle_fetch_fault_64(regs);
22089 +#endif
22090 +}
22091 +#endif
22092 +
22093 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22094 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22095 +{
22096 + long i;
22097 +
22098 + printk(KERN_ERR "PAX: bytes at PC: ");
22099 + for (i = 0; i < 20; i++) {
22100 + unsigned char c;
22101 + if (get_user(c, (unsigned char __force_user *)pc+i))
22102 + printk(KERN_CONT "?? ");
22103 + else
22104 + printk(KERN_CONT "%02x ", c);
22105 + }
22106 + printk("\n");
22107 +
22108 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22109 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
22110 + unsigned long c;
22111 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
22112 +#ifdef CONFIG_X86_32
22113 + printk(KERN_CONT "???????? ");
22114 +#else
22115 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22116 + printk(KERN_CONT "???????? ???????? ");
22117 + else
22118 + printk(KERN_CONT "???????????????? ");
22119 +#endif
22120 + } else {
22121 +#ifdef CONFIG_X86_64
22122 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22123 + printk(KERN_CONT "%08x ", (unsigned int)c);
22124 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22125 + } else
22126 +#endif
22127 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22128 + }
22129 + }
22130 + printk("\n");
22131 +}
22132 +#endif
22133 +
22134 +/**
22135 + * probe_kernel_write(): safely attempt to write to a location
22136 + * @dst: address to write to
22137 + * @src: pointer to the data that shall be written
22138 + * @size: size of the data chunk
22139 + *
22140 + * Safely write to address @dst from the buffer at @src. If a kernel fault
22141 + * happens, handle that and return -EFAULT.
22142 + */
22143 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22144 +{
22145 + long ret;
22146 + mm_segment_t old_fs = get_fs();
22147 +
22148 + set_fs(KERNEL_DS);
22149 + pagefault_disable();
22150 + pax_open_kernel();
22151 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22152 + pax_close_kernel();
22153 + pagefault_enable();
22154 + set_fs(old_fs);
22155 +
22156 + return ret ? -EFAULT : 0;
22157 +}
22158 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22159 index dd74e46..7d26398 100644
22160 --- a/arch/x86/mm/gup.c
22161 +++ b/arch/x86/mm/gup.c
22162 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22163 addr = start;
22164 len = (unsigned long) nr_pages << PAGE_SHIFT;
22165 end = start + len;
22166 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22167 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22168 (void __user *)start, len)))
22169 return 0;
22170
22171 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22172 index f4f29b1..5cac4fb 100644
22173 --- a/arch/x86/mm/highmem_32.c
22174 +++ b/arch/x86/mm/highmem_32.c
22175 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22176 idx = type + KM_TYPE_NR*smp_processor_id();
22177 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22178 BUG_ON(!pte_none(*(kmap_pte-idx)));
22179 +
22180 + pax_open_kernel();
22181 set_pte(kmap_pte-idx, mk_pte(page, prot));
22182 + pax_close_kernel();
22183 +
22184 arch_flush_lazy_mmu_mode();
22185
22186 return (void *)vaddr;
22187 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22188 index f581a18..29efd37 100644
22189 --- a/arch/x86/mm/hugetlbpage.c
22190 +++ b/arch/x86/mm/hugetlbpage.c
22191 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22192 struct hstate *h = hstate_file(file);
22193 struct mm_struct *mm = current->mm;
22194 struct vm_area_struct *vma;
22195 - unsigned long start_addr;
22196 + unsigned long start_addr, pax_task_size = TASK_SIZE;
22197 +
22198 +#ifdef CONFIG_PAX_SEGMEXEC
22199 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22200 + pax_task_size = SEGMEXEC_TASK_SIZE;
22201 +#endif
22202 +
22203 + pax_task_size -= PAGE_SIZE;
22204
22205 if (len > mm->cached_hole_size) {
22206 - start_addr = mm->free_area_cache;
22207 + start_addr = mm->free_area_cache;
22208 } else {
22209 - start_addr = TASK_UNMAPPED_BASE;
22210 - mm->cached_hole_size = 0;
22211 + start_addr = mm->mmap_base;
22212 + mm->cached_hole_size = 0;
22213 }
22214
22215 full_search:
22216 @@ -280,26 +287,27 @@ full_search:
22217
22218 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22219 /* At this point: (!vma || addr < vma->vm_end). */
22220 - if (TASK_SIZE - len < addr) {
22221 + if (pax_task_size - len < addr) {
22222 /*
22223 * Start a new search - just in case we missed
22224 * some holes.
22225 */
22226 - if (start_addr != TASK_UNMAPPED_BASE) {
22227 - start_addr = TASK_UNMAPPED_BASE;
22228 + if (start_addr != mm->mmap_base) {
22229 + start_addr = mm->mmap_base;
22230 mm->cached_hole_size = 0;
22231 goto full_search;
22232 }
22233 return -ENOMEM;
22234 }
22235 - if (!vma || addr + len <= vma->vm_start) {
22236 - mm->free_area_cache = addr + len;
22237 - return addr;
22238 - }
22239 + if (check_heap_stack_gap(vma, addr, len))
22240 + break;
22241 if (addr + mm->cached_hole_size < vma->vm_start)
22242 mm->cached_hole_size = vma->vm_start - addr;
22243 addr = ALIGN(vma->vm_end, huge_page_size(h));
22244 }
22245 +
22246 + mm->free_area_cache = addr + len;
22247 + return addr;
22248 }
22249
22250 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22251 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22252 {
22253 struct hstate *h = hstate_file(file);
22254 struct mm_struct *mm = current->mm;
22255 - struct vm_area_struct *vma, *prev_vma;
22256 - unsigned long base = mm->mmap_base, addr = addr0;
22257 + struct vm_area_struct *vma;
22258 + unsigned long base = mm->mmap_base, addr;
22259 unsigned long largest_hole = mm->cached_hole_size;
22260 - int first_time = 1;
22261
22262 /* don't allow allocations above current base */
22263 if (mm->free_area_cache > base)
22264 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22265 largest_hole = 0;
22266 mm->free_area_cache = base;
22267 }
22268 -try_again:
22269 +
22270 /* make sure it can fit in the remaining address space */
22271 if (mm->free_area_cache < len)
22272 goto fail;
22273
22274 /* either no address requested or can't fit in requested address hole */
22275 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
22276 + addr = (mm->free_area_cache - len);
22277 do {
22278 + addr &= huge_page_mask(h);
22279 + vma = find_vma(mm, addr);
22280 /*
22281 * Lookup failure means no vma is above this address,
22282 * i.e. return with success:
22283 - */
22284 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22285 - return addr;
22286 -
22287 - /*
22288 * new region fits between prev_vma->vm_end and
22289 * vma->vm_start, use it:
22290 */
22291 - if (addr + len <= vma->vm_start &&
22292 - (!prev_vma || (addr >= prev_vma->vm_end))) {
22293 + if (check_heap_stack_gap(vma, addr, len)) {
22294 /* remember the address as a hint for next time */
22295 - mm->cached_hole_size = largest_hole;
22296 - return (mm->free_area_cache = addr);
22297 - } else {
22298 - /* pull free_area_cache down to the first hole */
22299 - if (mm->free_area_cache == vma->vm_end) {
22300 - mm->free_area_cache = vma->vm_start;
22301 - mm->cached_hole_size = largest_hole;
22302 - }
22303 + mm->cached_hole_size = largest_hole;
22304 + return (mm->free_area_cache = addr);
22305 + }
22306 + /* pull free_area_cache down to the first hole */
22307 + if (mm->free_area_cache == vma->vm_end) {
22308 + mm->free_area_cache = vma->vm_start;
22309 + mm->cached_hole_size = largest_hole;
22310 }
22311
22312 /* remember the largest hole we saw so far */
22313 if (addr + largest_hole < vma->vm_start)
22314 - largest_hole = vma->vm_start - addr;
22315 + largest_hole = vma->vm_start - addr;
22316
22317 /* try just below the current vma->vm_start */
22318 - addr = (vma->vm_start - len) & huge_page_mask(h);
22319 - } while (len <= vma->vm_start);
22320 + addr = skip_heap_stack_gap(vma, len);
22321 + } while (!IS_ERR_VALUE(addr));
22322
22323 fail:
22324 /*
22325 - * if hint left us with no space for the requested
22326 - * mapping then try again:
22327 - */
22328 - if (first_time) {
22329 - mm->free_area_cache = base;
22330 - largest_hole = 0;
22331 - first_time = 0;
22332 - goto try_again;
22333 - }
22334 - /*
22335 * A failed mmap() very likely causes application failure,
22336 * so fall back to the bottom-up function here. This scenario
22337 * can happen with large stack limits and large mmap()
22338 * allocations.
22339 */
22340 - mm->free_area_cache = TASK_UNMAPPED_BASE;
22341 +
22342 +#ifdef CONFIG_PAX_SEGMEXEC
22343 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22344 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22345 + else
22346 +#endif
22347 +
22348 + mm->mmap_base = TASK_UNMAPPED_BASE;
22349 +
22350 +#ifdef CONFIG_PAX_RANDMMAP
22351 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22352 + mm->mmap_base += mm->delta_mmap;
22353 +#endif
22354 +
22355 + mm->free_area_cache = mm->mmap_base;
22356 mm->cached_hole_size = ~0UL;
22357 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22358 len, pgoff, flags);
22359 @@ -386,6 +392,7 @@ fail:
22360 /*
22361 * Restore the topdown base:
22362 */
22363 + mm->mmap_base = base;
22364 mm->free_area_cache = base;
22365 mm->cached_hole_size = ~0UL;
22366
22367 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22368 struct hstate *h = hstate_file(file);
22369 struct mm_struct *mm = current->mm;
22370 struct vm_area_struct *vma;
22371 + unsigned long pax_task_size = TASK_SIZE;
22372
22373 if (len & ~huge_page_mask(h))
22374 return -EINVAL;
22375 - if (len > TASK_SIZE)
22376 +
22377 +#ifdef CONFIG_PAX_SEGMEXEC
22378 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22379 + pax_task_size = SEGMEXEC_TASK_SIZE;
22380 +#endif
22381 +
22382 + pax_task_size -= PAGE_SIZE;
22383 +
22384 + if (len > pax_task_size)
22385 return -ENOMEM;
22386
22387 if (flags & MAP_FIXED) {
22388 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22389 if (addr) {
22390 addr = ALIGN(addr, huge_page_size(h));
22391 vma = find_vma(mm, addr);
22392 - if (TASK_SIZE - len >= addr &&
22393 - (!vma || addr + len <= vma->vm_start))
22394 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22395 return addr;
22396 }
22397 if (mm->get_unmapped_area == arch_get_unmapped_area)
22398 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22399 index 87488b9..a55509f 100644
22400 --- a/arch/x86/mm/init.c
22401 +++ b/arch/x86/mm/init.c
22402 @@ -15,6 +15,7 @@
22403 #include <asm/tlbflush.h>
22404 #include <asm/tlb.h>
22405 #include <asm/proto.h>
22406 +#include <asm/desc.h>
22407
22408 unsigned long __initdata pgt_buf_start;
22409 unsigned long __meminitdata pgt_buf_end;
22410 @@ -31,7 +32,7 @@ int direct_gbpages
22411 static void __init find_early_table_space(unsigned long end, int use_pse,
22412 int use_gbpages)
22413 {
22414 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22415 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22416 phys_addr_t base;
22417
22418 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22419 @@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22420 */
22421 int devmem_is_allowed(unsigned long pagenr)
22422 {
22423 +#ifdef CONFIG_GRKERNSEC_KMEM
22424 + /* allow BDA */
22425 + if (!pagenr)
22426 + return 1;
22427 + /* allow EBDA */
22428 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22429 + return 1;
22430 +#else
22431 + if (!pagenr)
22432 + return 1;
22433 +#ifdef CONFIG_VM86
22434 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22435 + return 1;
22436 +#endif
22437 +#endif
22438 +
22439 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22440 + return 1;
22441 +#ifdef CONFIG_GRKERNSEC_KMEM
22442 + /* throw out everything else below 1MB */
22443 if (pagenr <= 256)
22444 - return 1;
22445 + return 0;
22446 +#endif
22447 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22448 return 0;
22449 if (!page_is_ram(pagenr))
22450 @@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22451
22452 void free_initmem(void)
22453 {
22454 +
22455 +#ifdef CONFIG_PAX_KERNEXEC
22456 +#ifdef CONFIG_X86_32
22457 + /* PaX: limit KERNEL_CS to actual size */
22458 + unsigned long addr, limit;
22459 + struct desc_struct d;
22460 + int cpu;
22461 +
22462 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22463 + limit = (limit - 1UL) >> PAGE_SHIFT;
22464 +
22465 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22466 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22467 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22468 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22469 + }
22470 +
22471 + /* PaX: make KERNEL_CS read-only */
22472 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22473 + if (!paravirt_enabled())
22474 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22475 +/*
22476 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22477 + pgd = pgd_offset_k(addr);
22478 + pud = pud_offset(pgd, addr);
22479 + pmd = pmd_offset(pud, addr);
22480 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22481 + }
22482 +*/
22483 +#ifdef CONFIG_X86_PAE
22484 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22485 +/*
22486 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22487 + pgd = pgd_offset_k(addr);
22488 + pud = pud_offset(pgd, addr);
22489 + pmd = pmd_offset(pud, addr);
22490 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22491 + }
22492 +*/
22493 +#endif
22494 +
22495 +#ifdef CONFIG_MODULES
22496 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22497 +#endif
22498 +
22499 +#else
22500 + pgd_t *pgd;
22501 + pud_t *pud;
22502 + pmd_t *pmd;
22503 + unsigned long addr, end;
22504 +
22505 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22506 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22507 + pgd = pgd_offset_k(addr);
22508 + pud = pud_offset(pgd, addr);
22509 + pmd = pmd_offset(pud, addr);
22510 + if (!pmd_present(*pmd))
22511 + continue;
22512 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22513 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22514 + else
22515 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22516 + }
22517 +
22518 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22519 + end = addr + KERNEL_IMAGE_SIZE;
22520 + for (; addr < end; addr += PMD_SIZE) {
22521 + pgd = pgd_offset_k(addr);
22522 + pud = pud_offset(pgd, addr);
22523 + pmd = pmd_offset(pud, addr);
22524 + if (!pmd_present(*pmd))
22525 + continue;
22526 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22527 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22528 + }
22529 +#endif
22530 +
22531 + flush_tlb_all();
22532 +#endif
22533 +
22534 free_init_pages("unused kernel memory",
22535 (unsigned long)(&__init_begin),
22536 (unsigned long)(&__init_end));
22537 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22538 index 29f7c6d..b46b35b 100644
22539 --- a/arch/x86/mm/init_32.c
22540 +++ b/arch/x86/mm/init_32.c
22541 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22542 }
22543
22544 /*
22545 - * Creates a middle page table and puts a pointer to it in the
22546 - * given global directory entry. This only returns the gd entry
22547 - * in non-PAE compilation mode, since the middle layer is folded.
22548 - */
22549 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
22550 -{
22551 - pud_t *pud;
22552 - pmd_t *pmd_table;
22553 -
22554 -#ifdef CONFIG_X86_PAE
22555 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22556 - if (after_bootmem)
22557 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22558 - else
22559 - pmd_table = (pmd_t *)alloc_low_page();
22560 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22561 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22562 - pud = pud_offset(pgd, 0);
22563 - BUG_ON(pmd_table != pmd_offset(pud, 0));
22564 -
22565 - return pmd_table;
22566 - }
22567 -#endif
22568 - pud = pud_offset(pgd, 0);
22569 - pmd_table = pmd_offset(pud, 0);
22570 -
22571 - return pmd_table;
22572 -}
22573 -
22574 -/*
22575 * Create a page table and place a pointer to it in a middle page
22576 * directory entry:
22577 */
22578 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22579 page_table = (pte_t *)alloc_low_page();
22580
22581 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22582 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22583 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22584 +#else
22585 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22586 +#endif
22587 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22588 }
22589
22590 return pte_offset_kernel(pmd, 0);
22591 }
22592
22593 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
22594 +{
22595 + pud_t *pud;
22596 + pmd_t *pmd_table;
22597 +
22598 + pud = pud_offset(pgd, 0);
22599 + pmd_table = pmd_offset(pud, 0);
22600 +
22601 + return pmd_table;
22602 +}
22603 +
22604 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22605 {
22606 int pgd_idx = pgd_index(vaddr);
22607 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22608 int pgd_idx, pmd_idx;
22609 unsigned long vaddr;
22610 pgd_t *pgd;
22611 + pud_t *pud;
22612 pmd_t *pmd;
22613 pte_t *pte = NULL;
22614
22615 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22616 pgd = pgd_base + pgd_idx;
22617
22618 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22619 - pmd = one_md_table_init(pgd);
22620 - pmd = pmd + pmd_index(vaddr);
22621 + pud = pud_offset(pgd, vaddr);
22622 + pmd = pmd_offset(pud, vaddr);
22623 +
22624 +#ifdef CONFIG_X86_PAE
22625 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22626 +#endif
22627 +
22628 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22629 pmd++, pmd_idx++) {
22630 pte = page_table_kmap_check(one_page_table_init(pmd),
22631 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22632 }
22633 }
22634
22635 -static inline int is_kernel_text(unsigned long addr)
22636 +static inline int is_kernel_text(unsigned long start, unsigned long end)
22637 {
22638 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22639 - return 1;
22640 - return 0;
22641 + if ((start > ktla_ktva((unsigned long)_etext) ||
22642 + end <= ktla_ktva((unsigned long)_stext)) &&
22643 + (start > ktla_ktva((unsigned long)_einittext) ||
22644 + end <= ktla_ktva((unsigned long)_sinittext)) &&
22645 +
22646 +#ifdef CONFIG_ACPI_SLEEP
22647 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22648 +#endif
22649 +
22650 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22651 + return 0;
22652 + return 1;
22653 }
22654
22655 /*
22656 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22657 unsigned long last_map_addr = end;
22658 unsigned long start_pfn, end_pfn;
22659 pgd_t *pgd_base = swapper_pg_dir;
22660 - int pgd_idx, pmd_idx, pte_ofs;
22661 + unsigned int pgd_idx, pmd_idx, pte_ofs;
22662 unsigned long pfn;
22663 pgd_t *pgd;
22664 + pud_t *pud;
22665 pmd_t *pmd;
22666 pte_t *pte;
22667 unsigned pages_2m, pages_4k;
22668 @@ -281,8 +282,13 @@ repeat:
22669 pfn = start_pfn;
22670 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22671 pgd = pgd_base + pgd_idx;
22672 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22673 - pmd = one_md_table_init(pgd);
22674 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22675 + pud = pud_offset(pgd, 0);
22676 + pmd = pmd_offset(pud, 0);
22677 +
22678 +#ifdef CONFIG_X86_PAE
22679 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22680 +#endif
22681
22682 if (pfn >= end_pfn)
22683 continue;
22684 @@ -294,14 +300,13 @@ repeat:
22685 #endif
22686 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22687 pmd++, pmd_idx++) {
22688 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22689 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22690
22691 /*
22692 * Map with big pages if possible, otherwise
22693 * create normal page tables:
22694 */
22695 if (use_pse) {
22696 - unsigned int addr2;
22697 pgprot_t prot = PAGE_KERNEL_LARGE;
22698 /*
22699 * first pass will use the same initial
22700 @@ -311,11 +316,7 @@ repeat:
22701 __pgprot(PTE_IDENT_ATTR |
22702 _PAGE_PSE);
22703
22704 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22705 - PAGE_OFFSET + PAGE_SIZE-1;
22706 -
22707 - if (is_kernel_text(addr) ||
22708 - is_kernel_text(addr2))
22709 + if (is_kernel_text(address, address + PMD_SIZE))
22710 prot = PAGE_KERNEL_LARGE_EXEC;
22711
22712 pages_2m++;
22713 @@ -332,7 +333,7 @@ repeat:
22714 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22715 pte += pte_ofs;
22716 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22717 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22718 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22719 pgprot_t prot = PAGE_KERNEL;
22720 /*
22721 * first pass will use the same initial
22722 @@ -340,7 +341,7 @@ repeat:
22723 */
22724 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22725
22726 - if (is_kernel_text(addr))
22727 + if (is_kernel_text(address, address + PAGE_SIZE))
22728 prot = PAGE_KERNEL_EXEC;
22729
22730 pages_4k++;
22731 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22732
22733 pud = pud_offset(pgd, va);
22734 pmd = pmd_offset(pud, va);
22735 - if (!pmd_present(*pmd))
22736 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
22737 break;
22738
22739 pte = pte_offset_kernel(pmd, va);
22740 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22741
22742 static void __init pagetable_init(void)
22743 {
22744 - pgd_t *pgd_base = swapper_pg_dir;
22745 -
22746 - permanent_kmaps_init(pgd_base);
22747 + permanent_kmaps_init(swapper_pg_dir);
22748 }
22749
22750 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22751 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22752 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22753
22754 /* user-defined highmem size */
22755 @@ -757,6 +756,12 @@ void __init mem_init(void)
22756
22757 pci_iommu_alloc();
22758
22759 +#ifdef CONFIG_PAX_PER_CPU_PGD
22760 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22761 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22762 + KERNEL_PGD_PTRS);
22763 +#endif
22764 +
22765 #ifdef CONFIG_FLATMEM
22766 BUG_ON(!mem_map);
22767 #endif
22768 @@ -774,7 +779,7 @@ void __init mem_init(void)
22769 set_highmem_pages_init();
22770
22771 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22772 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22773 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22774 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22775
22776 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22777 @@ -815,10 +820,10 @@ void __init mem_init(void)
22778 ((unsigned long)&__init_end -
22779 (unsigned long)&__init_begin) >> 10,
22780
22781 - (unsigned long)&_etext, (unsigned long)&_edata,
22782 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22783 + (unsigned long)&_sdata, (unsigned long)&_edata,
22784 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22785
22786 - (unsigned long)&_text, (unsigned long)&_etext,
22787 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22788 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22789
22790 /*
22791 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22792 if (!kernel_set_to_readonly)
22793 return;
22794
22795 + start = ktla_ktva(start);
22796 pr_debug("Set kernel text: %lx - %lx for read write\n",
22797 start, start+size);
22798
22799 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22800 if (!kernel_set_to_readonly)
22801 return;
22802
22803 + start = ktla_ktva(start);
22804 pr_debug("Set kernel text: %lx - %lx for read only\n",
22805 start, start+size);
22806
22807 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22808 unsigned long start = PFN_ALIGN(_text);
22809 unsigned long size = PFN_ALIGN(_etext) - start;
22810
22811 + start = ktla_ktva(start);
22812 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22813 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22814 size >> 10);
22815 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22816 index bbaaa00..16dffad 100644
22817 --- a/arch/x86/mm/init_64.c
22818 +++ b/arch/x86/mm/init_64.c
22819 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22820 * around without checking the pgd every time.
22821 */
22822
22823 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22824 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22825 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22826
22827 int force_personality32;
22828 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22829
22830 for (address = start; address <= end; address += PGDIR_SIZE) {
22831 const pgd_t *pgd_ref = pgd_offset_k(address);
22832 +
22833 +#ifdef CONFIG_PAX_PER_CPU_PGD
22834 + unsigned long cpu;
22835 +#else
22836 struct page *page;
22837 +#endif
22838
22839 if (pgd_none(*pgd_ref))
22840 continue;
22841
22842 spin_lock(&pgd_lock);
22843 +
22844 +#ifdef CONFIG_PAX_PER_CPU_PGD
22845 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22846 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
22847 +#else
22848 list_for_each_entry(page, &pgd_list, lru) {
22849 pgd_t *pgd;
22850 spinlock_t *pgt_lock;
22851 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22852 /* the pgt_lock only for Xen */
22853 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22854 spin_lock(pgt_lock);
22855 +#endif
22856
22857 if (pgd_none(*pgd))
22858 set_pgd(pgd, *pgd_ref);
22859 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22860 BUG_ON(pgd_page_vaddr(*pgd)
22861 != pgd_page_vaddr(*pgd_ref));
22862
22863 +#ifndef CONFIG_PAX_PER_CPU_PGD
22864 spin_unlock(pgt_lock);
22865 +#endif
22866 +
22867 }
22868 spin_unlock(&pgd_lock);
22869 }
22870 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22871 pmd = fill_pmd(pud, vaddr);
22872 pte = fill_pte(pmd, vaddr);
22873
22874 + pax_open_kernel();
22875 set_pte(pte, new_pte);
22876 + pax_close_kernel();
22877
22878 /*
22879 * It's enough to flush this one mapping.
22880 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22881 pgd = pgd_offset_k((unsigned long)__va(phys));
22882 if (pgd_none(*pgd)) {
22883 pud = (pud_t *) spp_getpage();
22884 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22885 - _PAGE_USER));
22886 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22887 }
22888 pud = pud_offset(pgd, (unsigned long)__va(phys));
22889 if (pud_none(*pud)) {
22890 pmd = (pmd_t *) spp_getpage();
22891 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22892 - _PAGE_USER));
22893 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22894 }
22895 pmd = pmd_offset(pud, phys);
22896 BUG_ON(!pmd_none(*pmd));
22897 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22898 if (pfn >= pgt_buf_top)
22899 panic("alloc_low_page: ran out of memory");
22900
22901 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22902 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22903 clear_page(adr);
22904 *phys = pfn * PAGE_SIZE;
22905 return adr;
22906 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22907
22908 phys = __pa(virt);
22909 left = phys & (PAGE_SIZE - 1);
22910 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22911 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22912 adr = (void *)(((unsigned long)adr) | left);
22913
22914 return adr;
22915 @@ -693,6 +707,12 @@ void __init mem_init(void)
22916
22917 pci_iommu_alloc();
22918
22919 +#ifdef CONFIG_PAX_PER_CPU_PGD
22920 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22921 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22922 + KERNEL_PGD_PTRS);
22923 +#endif
22924 +
22925 /* clear_bss() already clear the empty_zero_page */
22926
22927 reservedpages = 0;
22928 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22929 static struct vm_area_struct gate_vma = {
22930 .vm_start = VSYSCALL_START,
22931 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22932 - .vm_page_prot = PAGE_READONLY_EXEC,
22933 - .vm_flags = VM_READ | VM_EXEC
22934 + .vm_page_prot = PAGE_READONLY,
22935 + .vm_flags = VM_READ
22936 };
22937
22938 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22939 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22940
22941 const char *arch_vma_name(struct vm_area_struct *vma)
22942 {
22943 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22944 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22945 return "[vdso]";
22946 if (vma == &gate_vma)
22947 return "[vsyscall]";
22948 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22949 index 7b179b4..6bd1777 100644
22950 --- a/arch/x86/mm/iomap_32.c
22951 +++ b/arch/x86/mm/iomap_32.c
22952 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22953 type = kmap_atomic_idx_push();
22954 idx = type + KM_TYPE_NR * smp_processor_id();
22955 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22956 +
22957 + pax_open_kernel();
22958 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22959 + pax_close_kernel();
22960 +
22961 arch_flush_lazy_mmu_mode();
22962
22963 return (void *)vaddr;
22964 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22965 index be1ef57..55f0160 100644
22966 --- a/arch/x86/mm/ioremap.c
22967 +++ b/arch/x86/mm/ioremap.c
22968 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22969 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22970 int is_ram = page_is_ram(pfn);
22971
22972 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22973 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22974 return NULL;
22975 WARN_ON_ONCE(is_ram);
22976 }
22977 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
22978
22979 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
22980 if (page_is_ram(start >> PAGE_SHIFT))
22981 +#ifdef CONFIG_HIGHMEM
22982 + if ((start >> PAGE_SHIFT) < max_low_pfn)
22983 +#endif
22984 return __va(phys);
22985
22986 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
22987 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
22988 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22989
22990 static __initdata int after_paging_init;
22991 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22992 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22993
22994 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22995 {
22996 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
22997 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22998
22999 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
23000 - memset(bm_pte, 0, sizeof(bm_pte));
23001 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
23002 + pmd_populate_user(&init_mm, pmd, bm_pte);
23003
23004 /*
23005 * The boot-ioremap range spans multiple pmds, for which
23006 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
23007 index d87dd6d..bf3fa66 100644
23008 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
23009 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23010 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23011 * memory (e.g. tracked pages)? For now, we need this to avoid
23012 * invoking kmemcheck for PnP BIOS calls.
23013 */
23014 - if (regs->flags & X86_VM_MASK)
23015 + if (v8086_mode(regs))
23016 return false;
23017 - if (regs->cs != __KERNEL_CS)
23018 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23019 return false;
23020
23021 pte = kmemcheck_pte_lookup(address);
23022 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23023 index 845df68..1d8d29f 100644
23024 --- a/arch/x86/mm/mmap.c
23025 +++ b/arch/x86/mm/mmap.c
23026 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23027 * Leave an at least ~128 MB hole with possible stack randomization.
23028 */
23029 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23030 -#define MAX_GAP (TASK_SIZE/6*5)
23031 +#define MAX_GAP (pax_task_size/6*5)
23032
23033 static int mmap_is_legacy(void)
23034 {
23035 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23036 return rnd << PAGE_SHIFT;
23037 }
23038
23039 -static unsigned long mmap_base(void)
23040 +static unsigned long mmap_base(struct mm_struct *mm)
23041 {
23042 unsigned long gap = rlimit(RLIMIT_STACK);
23043 + unsigned long pax_task_size = TASK_SIZE;
23044 +
23045 +#ifdef CONFIG_PAX_SEGMEXEC
23046 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23047 + pax_task_size = SEGMEXEC_TASK_SIZE;
23048 +#endif
23049
23050 if (gap < MIN_GAP)
23051 gap = MIN_GAP;
23052 else if (gap > MAX_GAP)
23053 gap = MAX_GAP;
23054
23055 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23056 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23057 }
23058
23059 /*
23060 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23061 * does, but not when emulating X86_32
23062 */
23063 -static unsigned long mmap_legacy_base(void)
23064 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
23065 {
23066 - if (mmap_is_ia32())
23067 + if (mmap_is_ia32()) {
23068 +
23069 +#ifdef CONFIG_PAX_SEGMEXEC
23070 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23071 + return SEGMEXEC_TASK_UNMAPPED_BASE;
23072 + else
23073 +#endif
23074 +
23075 return TASK_UNMAPPED_BASE;
23076 - else
23077 + } else
23078 return TASK_UNMAPPED_BASE + mmap_rnd();
23079 }
23080
23081 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23082 void arch_pick_mmap_layout(struct mm_struct *mm)
23083 {
23084 if (mmap_is_legacy()) {
23085 - mm->mmap_base = mmap_legacy_base();
23086 + mm->mmap_base = mmap_legacy_base(mm);
23087 +
23088 +#ifdef CONFIG_PAX_RANDMMAP
23089 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23090 + mm->mmap_base += mm->delta_mmap;
23091 +#endif
23092 +
23093 mm->get_unmapped_area = arch_get_unmapped_area;
23094 mm->unmap_area = arch_unmap_area;
23095 } else {
23096 - mm->mmap_base = mmap_base();
23097 + mm->mmap_base = mmap_base(mm);
23098 +
23099 +#ifdef CONFIG_PAX_RANDMMAP
23100 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23101 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23102 +#endif
23103 +
23104 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23105 mm->unmap_area = arch_unmap_area_topdown;
23106 }
23107 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23108 index de54b9b..799051e 100644
23109 --- a/arch/x86/mm/mmio-mod.c
23110 +++ b/arch/x86/mm/mmio-mod.c
23111 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23112 break;
23113 default:
23114 {
23115 - unsigned char *ip = (unsigned char *)instptr;
23116 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23117 my_trace->opcode = MMIO_UNKNOWN_OP;
23118 my_trace->width = 0;
23119 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23120 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23121 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23122 void __iomem *addr)
23123 {
23124 - static atomic_t next_id;
23125 + static atomic_unchecked_t next_id;
23126 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23127 /* These are page-unaligned. */
23128 struct mmiotrace_map map = {
23129 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23130 .private = trace
23131 },
23132 .phys = offset,
23133 - .id = atomic_inc_return(&next_id)
23134 + .id = atomic_inc_return_unchecked(&next_id)
23135 };
23136 map.map_id = trace->id;
23137
23138 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23139 index b008656..773eac2 100644
23140 --- a/arch/x86/mm/pageattr-test.c
23141 +++ b/arch/x86/mm/pageattr-test.c
23142 @@ -36,7 +36,7 @@ enum {
23143
23144 static int pte_testbit(pte_t pte)
23145 {
23146 - return pte_flags(pte) & _PAGE_UNUSED1;
23147 + return pte_flags(pte) & _PAGE_CPA_TEST;
23148 }
23149
23150 struct split_state {
23151 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23152 index f9e5267..6f6e27f 100644
23153 --- a/arch/x86/mm/pageattr.c
23154 +++ b/arch/x86/mm/pageattr.c
23155 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23156 */
23157 #ifdef CONFIG_PCI_BIOS
23158 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23159 - pgprot_val(forbidden) |= _PAGE_NX;
23160 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23161 #endif
23162
23163 /*
23164 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23165 * Does not cover __inittext since that is gone later on. On
23166 * 64bit we do not enforce !NX on the low mapping
23167 */
23168 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
23169 - pgprot_val(forbidden) |= _PAGE_NX;
23170 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23171 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23172
23173 +#ifdef CONFIG_DEBUG_RODATA
23174 /*
23175 * The .rodata section needs to be read-only. Using the pfn
23176 * catches all aliases.
23177 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23178 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23179 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23180 pgprot_val(forbidden) |= _PAGE_RW;
23181 +#endif
23182
23183 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23184 /*
23185 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23186 }
23187 #endif
23188
23189 +#ifdef CONFIG_PAX_KERNEXEC
23190 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23191 + pgprot_val(forbidden) |= _PAGE_RW;
23192 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23193 + }
23194 +#endif
23195 +
23196 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23197
23198 return prot;
23199 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23200 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23201 {
23202 /* change init_mm */
23203 + pax_open_kernel();
23204 set_pte_atomic(kpte, pte);
23205 +
23206 #ifdef CONFIG_X86_32
23207 if (!SHARED_KERNEL_PMD) {
23208 +
23209 +#ifdef CONFIG_PAX_PER_CPU_PGD
23210 + unsigned long cpu;
23211 +#else
23212 struct page *page;
23213 +#endif
23214
23215 +#ifdef CONFIG_PAX_PER_CPU_PGD
23216 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23217 + pgd_t *pgd = get_cpu_pgd(cpu);
23218 +#else
23219 list_for_each_entry(page, &pgd_list, lru) {
23220 - pgd_t *pgd;
23221 + pgd_t *pgd = (pgd_t *)page_address(page);
23222 +#endif
23223 +
23224 pud_t *pud;
23225 pmd_t *pmd;
23226
23227 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
23228 + pgd += pgd_index(address);
23229 pud = pud_offset(pgd, address);
23230 pmd = pmd_offset(pud, address);
23231 set_pte_atomic((pte_t *)pmd, pte);
23232 }
23233 }
23234 #endif
23235 + pax_close_kernel();
23236 }
23237
23238 static int
23239 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23240 index f6ff57b..481690f 100644
23241 --- a/arch/x86/mm/pat.c
23242 +++ b/arch/x86/mm/pat.c
23243 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23244
23245 if (!entry) {
23246 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23247 - current->comm, current->pid, start, end);
23248 + current->comm, task_pid_nr(current), start, end);
23249 return -EINVAL;
23250 }
23251
23252 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23253 while (cursor < to) {
23254 if (!devmem_is_allowed(pfn)) {
23255 printk(KERN_INFO
23256 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23257 - current->comm, from, to);
23258 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23259 + current->comm, from, to, cursor);
23260 return 0;
23261 }
23262 cursor += PAGE_SIZE;
23263 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23264 printk(KERN_INFO
23265 "%s:%d ioremap_change_attr failed %s "
23266 "for %Lx-%Lx\n",
23267 - current->comm, current->pid,
23268 + current->comm, task_pid_nr(current),
23269 cattr_name(flags),
23270 base, (unsigned long long)(base + size));
23271 return -EINVAL;
23272 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23273 if (want_flags != flags) {
23274 printk(KERN_WARNING
23275 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23276 - current->comm, current->pid,
23277 + current->comm, task_pid_nr(current),
23278 cattr_name(want_flags),
23279 (unsigned long long)paddr,
23280 (unsigned long long)(paddr + size),
23281 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23282 free_memtype(paddr, paddr + size);
23283 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23284 " for %Lx-%Lx, got %s\n",
23285 - current->comm, current->pid,
23286 + current->comm, task_pid_nr(current),
23287 cattr_name(want_flags),
23288 (unsigned long long)paddr,
23289 (unsigned long long)(paddr + size),
23290 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23291 index 9f0614d..92ae64a 100644
23292 --- a/arch/x86/mm/pf_in.c
23293 +++ b/arch/x86/mm/pf_in.c
23294 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23295 int i;
23296 enum reason_type rv = OTHERS;
23297
23298 - p = (unsigned char *)ins_addr;
23299 + p = (unsigned char *)ktla_ktva(ins_addr);
23300 p += skip_prefix(p, &prf);
23301 p += get_opcode(p, &opcode);
23302
23303 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23304 struct prefix_bits prf;
23305 int i;
23306
23307 - p = (unsigned char *)ins_addr;
23308 + p = (unsigned char *)ktla_ktva(ins_addr);
23309 p += skip_prefix(p, &prf);
23310 p += get_opcode(p, &opcode);
23311
23312 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23313 struct prefix_bits prf;
23314 int i;
23315
23316 - p = (unsigned char *)ins_addr;
23317 + p = (unsigned char *)ktla_ktva(ins_addr);
23318 p += skip_prefix(p, &prf);
23319 p += get_opcode(p, &opcode);
23320
23321 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23322 struct prefix_bits prf;
23323 int i;
23324
23325 - p = (unsigned char *)ins_addr;
23326 + p = (unsigned char *)ktla_ktva(ins_addr);
23327 p += skip_prefix(p, &prf);
23328 p += get_opcode(p, &opcode);
23329 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23330 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23331 struct prefix_bits prf;
23332 int i;
23333
23334 - p = (unsigned char *)ins_addr;
23335 + p = (unsigned char *)ktla_ktva(ins_addr);
23336 p += skip_prefix(p, &prf);
23337 p += get_opcode(p, &opcode);
23338 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23339 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23340 index 8573b83..6372501 100644
23341 --- a/arch/x86/mm/pgtable.c
23342 +++ b/arch/x86/mm/pgtable.c
23343 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23344 list_del(&page->lru);
23345 }
23346
23347 -#define UNSHARED_PTRS_PER_PGD \
23348 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23349 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23350 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23351
23352 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23353 +{
23354 + while (count--)
23355 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23356 +}
23357 +#endif
23358
23359 +#ifdef CONFIG_PAX_PER_CPU_PGD
23360 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23361 +{
23362 + while (count--)
23363 +
23364 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23365 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23366 +#else
23367 + *dst++ = *src++;
23368 +#endif
23369 +
23370 +}
23371 +#endif
23372 +
23373 +#ifdef CONFIG_X86_64
23374 +#define pxd_t pud_t
23375 +#define pyd_t pgd_t
23376 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23377 +#define pxd_free(mm, pud) pud_free((mm), (pud))
23378 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23379 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23380 +#define PYD_SIZE PGDIR_SIZE
23381 +#else
23382 +#define pxd_t pmd_t
23383 +#define pyd_t pud_t
23384 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23385 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
23386 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23387 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
23388 +#define PYD_SIZE PUD_SIZE
23389 +#endif
23390 +
23391 +#ifdef CONFIG_PAX_PER_CPU_PGD
23392 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23393 +static inline void pgd_dtor(pgd_t *pgd) {}
23394 +#else
23395 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23396 {
23397 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23398 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23399 pgd_list_del(pgd);
23400 spin_unlock(&pgd_lock);
23401 }
23402 +#endif
23403
23404 /*
23405 * List of all pgd's needed for non-PAE so it can invalidate entries
23406 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23407 * -- wli
23408 */
23409
23410 -#ifdef CONFIG_X86_PAE
23411 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23412 /*
23413 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23414 * updating the top-level pagetable entries to guarantee the
23415 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23416 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23417 * and initialize the kernel pmds here.
23418 */
23419 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23420 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23421
23422 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23423 {
23424 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23425 */
23426 flush_tlb_mm(mm);
23427 }
23428 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23429 +#define PREALLOCATED_PXDS USER_PGD_PTRS
23430 #else /* !CONFIG_X86_PAE */
23431
23432 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23433 -#define PREALLOCATED_PMDS 0
23434 +#define PREALLOCATED_PXDS 0
23435
23436 #endif /* CONFIG_X86_PAE */
23437
23438 -static void free_pmds(pmd_t *pmds[])
23439 +static void free_pxds(pxd_t *pxds[])
23440 {
23441 int i;
23442
23443 - for(i = 0; i < PREALLOCATED_PMDS; i++)
23444 - if (pmds[i])
23445 - free_page((unsigned long)pmds[i]);
23446 + for(i = 0; i < PREALLOCATED_PXDS; i++)
23447 + if (pxds[i])
23448 + free_page((unsigned long)pxds[i]);
23449 }
23450
23451 -static int preallocate_pmds(pmd_t *pmds[])
23452 +static int preallocate_pxds(pxd_t *pxds[])
23453 {
23454 int i;
23455 bool failed = false;
23456
23457 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23458 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23459 - if (pmd == NULL)
23460 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23461 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23462 + if (pxd == NULL)
23463 failed = true;
23464 - pmds[i] = pmd;
23465 + pxds[i] = pxd;
23466 }
23467
23468 if (failed) {
23469 - free_pmds(pmds);
23470 + free_pxds(pxds);
23471 return -ENOMEM;
23472 }
23473
23474 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23475 * preallocate which never got a corresponding vma will need to be
23476 * freed manually.
23477 */
23478 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23479 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23480 {
23481 int i;
23482
23483 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23484 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23485 pgd_t pgd = pgdp[i];
23486
23487 if (pgd_val(pgd) != 0) {
23488 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23489 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23490
23491 - pgdp[i] = native_make_pgd(0);
23492 + set_pgd(pgdp + i, native_make_pgd(0));
23493
23494 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23495 - pmd_free(mm, pmd);
23496 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23497 + pxd_free(mm, pxd);
23498 }
23499 }
23500 }
23501
23502 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23503 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23504 {
23505 - pud_t *pud;
23506 + pyd_t *pyd;
23507 unsigned long addr;
23508 int i;
23509
23510 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23511 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23512 return;
23513
23514 - pud = pud_offset(pgd, 0);
23515 +#ifdef CONFIG_X86_64
23516 + pyd = pyd_offset(mm, 0L);
23517 +#else
23518 + pyd = pyd_offset(pgd, 0L);
23519 +#endif
23520
23521 - for (addr = i = 0; i < PREALLOCATED_PMDS;
23522 - i++, pud++, addr += PUD_SIZE) {
23523 - pmd_t *pmd = pmds[i];
23524 + for (addr = i = 0; i < PREALLOCATED_PXDS;
23525 + i++, pyd++, addr += PYD_SIZE) {
23526 + pxd_t *pxd = pxds[i];
23527
23528 if (i >= KERNEL_PGD_BOUNDARY)
23529 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23530 - sizeof(pmd_t) * PTRS_PER_PMD);
23531 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23532 + sizeof(pxd_t) * PTRS_PER_PMD);
23533
23534 - pud_populate(mm, pud, pmd);
23535 + pyd_populate(mm, pyd, pxd);
23536 }
23537 }
23538
23539 pgd_t *pgd_alloc(struct mm_struct *mm)
23540 {
23541 pgd_t *pgd;
23542 - pmd_t *pmds[PREALLOCATED_PMDS];
23543 + pxd_t *pxds[PREALLOCATED_PXDS];
23544
23545 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23546
23547 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23548
23549 mm->pgd = pgd;
23550
23551 - if (preallocate_pmds(pmds) != 0)
23552 + if (preallocate_pxds(pxds) != 0)
23553 goto out_free_pgd;
23554
23555 if (paravirt_pgd_alloc(mm) != 0)
23556 - goto out_free_pmds;
23557 + goto out_free_pxds;
23558
23559 /*
23560 * Make sure that pre-populating the pmds is atomic with
23561 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23562 spin_lock(&pgd_lock);
23563
23564 pgd_ctor(mm, pgd);
23565 - pgd_prepopulate_pmd(mm, pgd, pmds);
23566 + pgd_prepopulate_pxd(mm, pgd, pxds);
23567
23568 spin_unlock(&pgd_lock);
23569
23570 return pgd;
23571
23572 -out_free_pmds:
23573 - free_pmds(pmds);
23574 +out_free_pxds:
23575 + free_pxds(pxds);
23576 out_free_pgd:
23577 free_page((unsigned long)pgd);
23578 out:
23579 @@ -295,7 +344,7 @@ out:
23580
23581 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23582 {
23583 - pgd_mop_up_pmds(mm, pgd);
23584 + pgd_mop_up_pxds(mm, pgd);
23585 pgd_dtor(pgd);
23586 paravirt_pgd_free(mm, pgd);
23587 free_page((unsigned long)pgd);
23588 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23589 index cac7184..09a39fa 100644
23590 --- a/arch/x86/mm/pgtable_32.c
23591 +++ b/arch/x86/mm/pgtable_32.c
23592 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23593 return;
23594 }
23595 pte = pte_offset_kernel(pmd, vaddr);
23596 +
23597 + pax_open_kernel();
23598 if (pte_val(pteval))
23599 set_pte_at(&init_mm, vaddr, pte, pteval);
23600 else
23601 pte_clear(&init_mm, vaddr, pte);
23602 + pax_close_kernel();
23603
23604 /*
23605 * It's enough to flush this one mapping.
23606 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23607 index 410531d..0f16030 100644
23608 --- a/arch/x86/mm/setup_nx.c
23609 +++ b/arch/x86/mm/setup_nx.c
23610 @@ -5,8 +5,10 @@
23611 #include <asm/pgtable.h>
23612 #include <asm/proto.h>
23613
23614 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23615 static int disable_nx __cpuinitdata;
23616
23617 +#ifndef CONFIG_PAX_PAGEEXEC
23618 /*
23619 * noexec = on|off
23620 *
23621 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23622 return 0;
23623 }
23624 early_param("noexec", noexec_setup);
23625 +#endif
23626 +
23627 +#endif
23628
23629 void __cpuinit x86_configure_nx(void)
23630 {
23631 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23632 if (cpu_has_nx && !disable_nx)
23633 __supported_pte_mask |= _PAGE_NX;
23634 else
23635 +#endif
23636 __supported_pte_mask &= ~_PAGE_NX;
23637 }
23638
23639 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23640 index d6c0418..06a0ad5 100644
23641 --- a/arch/x86/mm/tlb.c
23642 +++ b/arch/x86/mm/tlb.c
23643 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
23644 BUG();
23645 cpumask_clear_cpu(cpu,
23646 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23647 +
23648 +#ifndef CONFIG_PAX_PER_CPU_PGD
23649 load_cr3(swapper_pg_dir);
23650 +#endif
23651 +
23652 }
23653 EXPORT_SYMBOL_GPL(leave_mm);
23654
23655 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23656 index 6687022..ceabcfa 100644
23657 --- a/arch/x86/net/bpf_jit.S
23658 +++ b/arch/x86/net/bpf_jit.S
23659 @@ -9,6 +9,7 @@
23660 */
23661 #include <linux/linkage.h>
23662 #include <asm/dwarf2.h>
23663 +#include <asm/alternative-asm.h>
23664
23665 /*
23666 * Calling convention :
23667 @@ -35,6 +36,7 @@ sk_load_word:
23668 jle bpf_slow_path_word
23669 mov (SKBDATA,%rsi),%eax
23670 bswap %eax /* ntohl() */
23671 + pax_force_retaddr
23672 ret
23673
23674
23675 @@ -53,6 +55,7 @@ sk_load_half:
23676 jle bpf_slow_path_half
23677 movzwl (SKBDATA,%rsi),%eax
23678 rol $8,%ax # ntohs()
23679 + pax_force_retaddr
23680 ret
23681
23682 sk_load_byte_ind:
23683 @@ -66,6 +69,7 @@ sk_load_byte:
23684 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23685 jle bpf_slow_path_byte
23686 movzbl (SKBDATA,%rsi),%eax
23687 + pax_force_retaddr
23688 ret
23689
23690 /**
23691 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23692 movzbl (SKBDATA,%rsi),%ebx
23693 and $15,%bl
23694 shl $2,%bl
23695 + pax_force_retaddr
23696 ret
23697 CFI_ENDPROC
23698 ENDPROC(sk_load_byte_msh)
23699 @@ -91,6 +96,7 @@ bpf_error:
23700 xor %eax,%eax
23701 mov -8(%rbp),%rbx
23702 leaveq
23703 + pax_force_retaddr
23704 ret
23705
23706 /* rsi contains offset and can be scratched */
23707 @@ -113,6 +119,7 @@ bpf_slow_path_word:
23708 js bpf_error
23709 mov -12(%rbp),%eax
23710 bswap %eax
23711 + pax_force_retaddr
23712 ret
23713
23714 bpf_slow_path_half:
23715 @@ -121,12 +128,14 @@ bpf_slow_path_half:
23716 mov -12(%rbp),%ax
23717 rol $8,%ax
23718 movzwl %ax,%eax
23719 + pax_force_retaddr
23720 ret
23721
23722 bpf_slow_path_byte:
23723 bpf_slow_path_common(1)
23724 js bpf_error
23725 movzbl -12(%rbp),%eax
23726 + pax_force_retaddr
23727 ret
23728
23729 bpf_slow_path_byte_msh:
23730 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23731 and $15,%al
23732 shl $2,%al
23733 xchg %eax,%ebx
23734 + pax_force_retaddr
23735 ret
23736 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23737 index 7c1b765..180e3b2 100644
23738 --- a/arch/x86/net/bpf_jit_comp.c
23739 +++ b/arch/x86/net/bpf_jit_comp.c
23740 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23741 set_fs(old_fs);
23742 }
23743
23744 +struct bpf_jit_work {
23745 + struct work_struct work;
23746 + void *image;
23747 +};
23748
23749 void bpf_jit_compile(struct sk_filter *fp)
23750 {
23751 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23752 if (addrs == NULL)
23753 return;
23754
23755 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23756 + if (!fp->work)
23757 + goto out;
23758 +
23759 /* Before first pass, make a rough estimation of addrs[]
23760 * each bpf instruction is translated to less than 64 bytes
23761 */
23762 @@ -592,11 +600,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23763 if (image) {
23764 if (unlikely(proglen + ilen > oldproglen)) {
23765 pr_err("bpb_jit_compile fatal error\n");
23766 - kfree(addrs);
23767 - module_free(NULL, image);
23768 - return;
23769 + module_free_exec(NULL, image);
23770 + goto out;
23771 }
23772 + pax_open_kernel();
23773 memcpy(image + proglen, temp, ilen);
23774 + pax_close_kernel();
23775 }
23776 proglen += ilen;
23777 addrs[i] = proglen;
23778 @@ -617,7 +626,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23779 break;
23780 }
23781 if (proglen == oldproglen) {
23782 - image = module_alloc(max_t(unsigned int,
23783 + image = module_alloc_exec(max_t(unsigned int,
23784 proglen,
23785 sizeof(struct work_struct)));
23786 if (!image)
23787 @@ -639,24 +648,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23788 fp->bpf_func = (void *)image;
23789 }
23790 out:
23791 + kfree(fp->work);
23792 kfree(addrs);
23793 return;
23794 }
23795
23796 static void jit_free_defer(struct work_struct *arg)
23797 {
23798 - module_free(NULL, arg);
23799 + module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
23800 + kfree(arg);
23801 }
23802
23803 /* run from softirq, we must use a work_struct to call
23804 - * module_free() from process context
23805 + * module_free_exec() from process context
23806 */
23807 void bpf_jit_free(struct sk_filter *fp)
23808 {
23809 if (fp->bpf_func != sk_run_filter) {
23810 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
23811 + struct work_struct *work = &fp->work->work;
23812
23813 INIT_WORK(work, jit_free_defer);
23814 + fp->work->image = fp->bpf_func;
23815 schedule_work(work);
23816 }
23817 }
23818 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23819 index bff89df..377758a 100644
23820 --- a/arch/x86/oprofile/backtrace.c
23821 +++ b/arch/x86/oprofile/backtrace.c
23822 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23823 struct stack_frame_ia32 *fp;
23824 unsigned long bytes;
23825
23826 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23827 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23828 if (bytes != sizeof(bufhead))
23829 return NULL;
23830
23831 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23832 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23833
23834 oprofile_add_trace(bufhead[0].return_address);
23835
23836 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23837 struct stack_frame bufhead[2];
23838 unsigned long bytes;
23839
23840 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23841 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23842 if (bytes != sizeof(bufhead))
23843 return NULL;
23844
23845 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23846 {
23847 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23848
23849 - if (!user_mode_vm(regs)) {
23850 + if (!user_mode(regs)) {
23851 unsigned long stack = kernel_stack_pointer(regs);
23852 if (depth)
23853 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23854 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23855 index cb29191..036766d 100644
23856 --- a/arch/x86/pci/mrst.c
23857 +++ b/arch/x86/pci/mrst.c
23858 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23859 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23860 pci_mmcfg_late_init();
23861 pcibios_enable_irq = mrst_pci_irq_enable;
23862 - pci_root_ops = pci_mrst_ops;
23863 + pax_open_kernel();
23864 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23865 + pax_close_kernel();
23866 /* Continue with standard init */
23867 return 1;
23868 }
23869 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23870 index db0e9a5..8844dea 100644
23871 --- a/arch/x86/pci/pcbios.c
23872 +++ b/arch/x86/pci/pcbios.c
23873 @@ -79,50 +79,93 @@ union bios32 {
23874 static struct {
23875 unsigned long address;
23876 unsigned short segment;
23877 -} bios32_indirect = { 0, __KERNEL_CS };
23878 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23879
23880 /*
23881 * Returns the entry point for the given service, NULL on error
23882 */
23883
23884 -static unsigned long bios32_service(unsigned long service)
23885 +static unsigned long __devinit bios32_service(unsigned long service)
23886 {
23887 unsigned char return_code; /* %al */
23888 unsigned long address; /* %ebx */
23889 unsigned long length; /* %ecx */
23890 unsigned long entry; /* %edx */
23891 unsigned long flags;
23892 + struct desc_struct d, *gdt;
23893
23894 local_irq_save(flags);
23895 - __asm__("lcall *(%%edi); cld"
23896 +
23897 + gdt = get_cpu_gdt_table(smp_processor_id());
23898 +
23899 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23900 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23901 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23902 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23903 +
23904 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23905 : "=a" (return_code),
23906 "=b" (address),
23907 "=c" (length),
23908 "=d" (entry)
23909 : "0" (service),
23910 "1" (0),
23911 - "D" (&bios32_indirect));
23912 + "D" (&bios32_indirect),
23913 + "r"(__PCIBIOS_DS)
23914 + : "memory");
23915 +
23916 + pax_open_kernel();
23917 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23918 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23919 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23920 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23921 + pax_close_kernel();
23922 +
23923 local_irq_restore(flags);
23924
23925 switch (return_code) {
23926 - case 0:
23927 - return address + entry;
23928 - case 0x80: /* Not present */
23929 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23930 - return 0;
23931 - default: /* Shouldn't happen */
23932 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23933 - service, return_code);
23934 + case 0: {
23935 + int cpu;
23936 + unsigned char flags;
23937 +
23938 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23939 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23940 + printk(KERN_WARNING "bios32_service: not valid\n");
23941 return 0;
23942 + }
23943 + address = address + PAGE_OFFSET;
23944 + length += 16UL; /* some BIOSs underreport this... */
23945 + flags = 4;
23946 + if (length >= 64*1024*1024) {
23947 + length >>= PAGE_SHIFT;
23948 + flags |= 8;
23949 + }
23950 +
23951 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
23952 + gdt = get_cpu_gdt_table(cpu);
23953 + pack_descriptor(&d, address, length, 0x9b, flags);
23954 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23955 + pack_descriptor(&d, address, length, 0x93, flags);
23956 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23957 + }
23958 + return entry;
23959 + }
23960 + case 0x80: /* Not present */
23961 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23962 + return 0;
23963 + default: /* Shouldn't happen */
23964 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23965 + service, return_code);
23966 + return 0;
23967 }
23968 }
23969
23970 static struct {
23971 unsigned long address;
23972 unsigned short segment;
23973 -} pci_indirect = { 0, __KERNEL_CS };
23974 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23975
23976 -static int pci_bios_present;
23977 +static int pci_bios_present __read_only;
23978
23979 static int __devinit check_pcibios(void)
23980 {
23981 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
23982 unsigned long flags, pcibios_entry;
23983
23984 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23985 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23986 + pci_indirect.address = pcibios_entry;
23987
23988 local_irq_save(flags);
23989 - __asm__(
23990 - "lcall *(%%edi); cld\n\t"
23991 + __asm__("movw %w6, %%ds\n\t"
23992 + "lcall *%%ss:(%%edi); cld\n\t"
23993 + "push %%ss\n\t"
23994 + "pop %%ds\n\t"
23995 "jc 1f\n\t"
23996 "xor %%ah, %%ah\n"
23997 "1:"
23998 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
23999 "=b" (ebx),
24000 "=c" (ecx)
24001 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
24002 - "D" (&pci_indirect)
24003 + "D" (&pci_indirect),
24004 + "r" (__PCIBIOS_DS)
24005 : "memory");
24006 local_irq_restore(flags);
24007
24008 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24009
24010 switch (len) {
24011 case 1:
24012 - __asm__("lcall *(%%esi); cld\n\t"
24013 + __asm__("movw %w6, %%ds\n\t"
24014 + "lcall *%%ss:(%%esi); cld\n\t"
24015 + "push %%ss\n\t"
24016 + "pop %%ds\n\t"
24017 "jc 1f\n\t"
24018 "xor %%ah, %%ah\n"
24019 "1:"
24020 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24021 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24022 "b" (bx),
24023 "D" ((long)reg),
24024 - "S" (&pci_indirect));
24025 + "S" (&pci_indirect),
24026 + "r" (__PCIBIOS_DS));
24027 /*
24028 * Zero-extend the result beyond 8 bits, do not trust the
24029 * BIOS having done it:
24030 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24031 *value &= 0xff;
24032 break;
24033 case 2:
24034 - __asm__("lcall *(%%esi); cld\n\t"
24035 + __asm__("movw %w6, %%ds\n\t"
24036 + "lcall *%%ss:(%%esi); cld\n\t"
24037 + "push %%ss\n\t"
24038 + "pop %%ds\n\t"
24039 "jc 1f\n\t"
24040 "xor %%ah, %%ah\n"
24041 "1:"
24042 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24043 : "1" (PCIBIOS_READ_CONFIG_WORD),
24044 "b" (bx),
24045 "D" ((long)reg),
24046 - "S" (&pci_indirect));
24047 + "S" (&pci_indirect),
24048 + "r" (__PCIBIOS_DS));
24049 /*
24050 * Zero-extend the result beyond 16 bits, do not trust the
24051 * BIOS having done it:
24052 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24053 *value &= 0xffff;
24054 break;
24055 case 4:
24056 - __asm__("lcall *(%%esi); cld\n\t"
24057 + __asm__("movw %w6, %%ds\n\t"
24058 + "lcall *%%ss:(%%esi); cld\n\t"
24059 + "push %%ss\n\t"
24060 + "pop %%ds\n\t"
24061 "jc 1f\n\t"
24062 "xor %%ah, %%ah\n"
24063 "1:"
24064 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24065 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24066 "b" (bx),
24067 "D" ((long)reg),
24068 - "S" (&pci_indirect));
24069 + "S" (&pci_indirect),
24070 + "r" (__PCIBIOS_DS));
24071 break;
24072 }
24073
24074 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24075
24076 switch (len) {
24077 case 1:
24078 - __asm__("lcall *(%%esi); cld\n\t"
24079 + __asm__("movw %w6, %%ds\n\t"
24080 + "lcall *%%ss:(%%esi); cld\n\t"
24081 + "push %%ss\n\t"
24082 + "pop %%ds\n\t"
24083 "jc 1f\n\t"
24084 "xor %%ah, %%ah\n"
24085 "1:"
24086 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24087 "c" (value),
24088 "b" (bx),
24089 "D" ((long)reg),
24090 - "S" (&pci_indirect));
24091 + "S" (&pci_indirect),
24092 + "r" (__PCIBIOS_DS));
24093 break;
24094 case 2:
24095 - __asm__("lcall *(%%esi); cld\n\t"
24096 + __asm__("movw %w6, %%ds\n\t"
24097 + "lcall *%%ss:(%%esi); cld\n\t"
24098 + "push %%ss\n\t"
24099 + "pop %%ds\n\t"
24100 "jc 1f\n\t"
24101 "xor %%ah, %%ah\n"
24102 "1:"
24103 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24104 "c" (value),
24105 "b" (bx),
24106 "D" ((long)reg),
24107 - "S" (&pci_indirect));
24108 + "S" (&pci_indirect),
24109 + "r" (__PCIBIOS_DS));
24110 break;
24111 case 4:
24112 - __asm__("lcall *(%%esi); cld\n\t"
24113 + __asm__("movw %w6, %%ds\n\t"
24114 + "lcall *%%ss:(%%esi); cld\n\t"
24115 + "push %%ss\n\t"
24116 + "pop %%ds\n\t"
24117 "jc 1f\n\t"
24118 "xor %%ah, %%ah\n"
24119 "1:"
24120 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24121 "c" (value),
24122 "b" (bx),
24123 "D" ((long)reg),
24124 - "S" (&pci_indirect));
24125 + "S" (&pci_indirect),
24126 + "r" (__PCIBIOS_DS));
24127 break;
24128 }
24129
24130 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24131
24132 DBG("PCI: Fetching IRQ routing table... ");
24133 __asm__("push %%es\n\t"
24134 + "movw %w8, %%ds\n\t"
24135 "push %%ds\n\t"
24136 "pop %%es\n\t"
24137 - "lcall *(%%esi); cld\n\t"
24138 + "lcall *%%ss:(%%esi); cld\n\t"
24139 "pop %%es\n\t"
24140 + "push %%ss\n\t"
24141 + "pop %%ds\n"
24142 "jc 1f\n\t"
24143 "xor %%ah, %%ah\n"
24144 "1:"
24145 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24146 "1" (0),
24147 "D" ((long) &opt),
24148 "S" (&pci_indirect),
24149 - "m" (opt)
24150 + "m" (opt),
24151 + "r" (__PCIBIOS_DS)
24152 : "memory");
24153 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24154 if (ret & 0xff00)
24155 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24156 {
24157 int ret;
24158
24159 - __asm__("lcall *(%%esi); cld\n\t"
24160 + __asm__("movw %w5, %%ds\n\t"
24161 + "lcall *%%ss:(%%esi); cld\n\t"
24162 + "push %%ss\n\t"
24163 + "pop %%ds\n"
24164 "jc 1f\n\t"
24165 "xor %%ah, %%ah\n"
24166 "1:"
24167 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24168 : "0" (PCIBIOS_SET_PCI_HW_INT),
24169 "b" ((dev->bus->number << 8) | dev->devfn),
24170 "c" ((irq << 8) | (pin + 10)),
24171 - "S" (&pci_indirect));
24172 + "S" (&pci_indirect),
24173 + "r" (__PCIBIOS_DS));
24174 return !(ret & 0xff00);
24175 }
24176 EXPORT_SYMBOL(pcibios_set_irq_routing);
24177 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24178 index 40e4469..1ab536e 100644
24179 --- a/arch/x86/platform/efi/efi_32.c
24180 +++ b/arch/x86/platform/efi/efi_32.c
24181 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24182 {
24183 struct desc_ptr gdt_descr;
24184
24185 +#ifdef CONFIG_PAX_KERNEXEC
24186 + struct desc_struct d;
24187 +#endif
24188 +
24189 local_irq_save(efi_rt_eflags);
24190
24191 load_cr3(initial_page_table);
24192 __flush_tlb_all();
24193
24194 +#ifdef CONFIG_PAX_KERNEXEC
24195 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24196 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24197 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24198 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24199 +#endif
24200 +
24201 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24202 gdt_descr.size = GDT_SIZE - 1;
24203 load_gdt(&gdt_descr);
24204 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24205 {
24206 struct desc_ptr gdt_descr;
24207
24208 +#ifdef CONFIG_PAX_KERNEXEC
24209 + struct desc_struct d;
24210 +
24211 + memset(&d, 0, sizeof d);
24212 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24213 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24214 +#endif
24215 +
24216 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24217 gdt_descr.size = GDT_SIZE - 1;
24218 load_gdt(&gdt_descr);
24219 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24220 index fbe66e6..c5c0dd2 100644
24221 --- a/arch/x86/platform/efi/efi_stub_32.S
24222 +++ b/arch/x86/platform/efi/efi_stub_32.S
24223 @@ -6,7 +6,9 @@
24224 */
24225
24226 #include <linux/linkage.h>
24227 +#include <linux/init.h>
24228 #include <asm/page_types.h>
24229 +#include <asm/segment.h>
24230
24231 /*
24232 * efi_call_phys(void *, ...) is a function with variable parameters.
24233 @@ -20,7 +22,7 @@
24234 * service functions will comply with gcc calling convention, too.
24235 */
24236
24237 -.text
24238 +__INIT
24239 ENTRY(efi_call_phys)
24240 /*
24241 * 0. The function can only be called in Linux kernel. So CS has been
24242 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24243 * The mapping of lower virtual memory has been created in prelog and
24244 * epilog.
24245 */
24246 - movl $1f, %edx
24247 - subl $__PAGE_OFFSET, %edx
24248 - jmp *%edx
24249 + movl $(__KERNEXEC_EFI_DS), %edx
24250 + mov %edx, %ds
24251 + mov %edx, %es
24252 + mov %edx, %ss
24253 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24254 1:
24255
24256 /*
24257 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24258 * parameter 2, ..., param n. To make things easy, we save the return
24259 * address of efi_call_phys in a global variable.
24260 */
24261 - popl %edx
24262 - movl %edx, saved_return_addr
24263 - /* get the function pointer into ECX*/
24264 - popl %ecx
24265 - movl %ecx, efi_rt_function_ptr
24266 - movl $2f, %edx
24267 - subl $__PAGE_OFFSET, %edx
24268 - pushl %edx
24269 + popl (saved_return_addr)
24270 + popl (efi_rt_function_ptr)
24271
24272 /*
24273 * 3. Clear PG bit in %CR0.
24274 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24275 /*
24276 * 5. Call the physical function.
24277 */
24278 - jmp *%ecx
24279 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
24280
24281 -2:
24282 /*
24283 * 6. After EFI runtime service returns, control will return to
24284 * following instruction. We'd better readjust stack pointer first.
24285 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24286 movl %cr0, %edx
24287 orl $0x80000000, %edx
24288 movl %edx, %cr0
24289 - jmp 1f
24290 -1:
24291 +
24292 /*
24293 * 8. Now restore the virtual mode from flat mode by
24294 * adding EIP with PAGE_OFFSET.
24295 */
24296 - movl $1f, %edx
24297 - jmp *%edx
24298 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24299 1:
24300 + movl $(__KERNEL_DS), %edx
24301 + mov %edx, %ds
24302 + mov %edx, %es
24303 + mov %edx, %ss
24304
24305 /*
24306 * 9. Balance the stack. And because EAX contain the return value,
24307 * we'd better not clobber it.
24308 */
24309 - leal efi_rt_function_ptr, %edx
24310 - movl (%edx), %ecx
24311 - pushl %ecx
24312 + pushl (efi_rt_function_ptr)
24313
24314 /*
24315 - * 10. Push the saved return address onto the stack and return.
24316 + * 10. Return to the saved return address.
24317 */
24318 - leal saved_return_addr, %edx
24319 - movl (%edx), %ecx
24320 - pushl %ecx
24321 - ret
24322 + jmpl *(saved_return_addr)
24323 ENDPROC(efi_call_phys)
24324 .previous
24325
24326 -.data
24327 +__INITDATA
24328 saved_return_addr:
24329 .long 0
24330 efi_rt_function_ptr:
24331 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24332 index 4c07cca..2c8427d 100644
24333 --- a/arch/x86/platform/efi/efi_stub_64.S
24334 +++ b/arch/x86/platform/efi/efi_stub_64.S
24335 @@ -7,6 +7,7 @@
24336 */
24337
24338 #include <linux/linkage.h>
24339 +#include <asm/alternative-asm.h>
24340
24341 #define SAVE_XMM \
24342 mov %rsp, %rax; \
24343 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
24344 call *%rdi
24345 addq $32, %rsp
24346 RESTORE_XMM
24347 + pax_force_retaddr 0, 1
24348 ret
24349 ENDPROC(efi_call0)
24350
24351 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
24352 call *%rdi
24353 addq $32, %rsp
24354 RESTORE_XMM
24355 + pax_force_retaddr 0, 1
24356 ret
24357 ENDPROC(efi_call1)
24358
24359 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
24360 call *%rdi
24361 addq $32, %rsp
24362 RESTORE_XMM
24363 + pax_force_retaddr 0, 1
24364 ret
24365 ENDPROC(efi_call2)
24366
24367 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
24368 call *%rdi
24369 addq $32, %rsp
24370 RESTORE_XMM
24371 + pax_force_retaddr 0, 1
24372 ret
24373 ENDPROC(efi_call3)
24374
24375 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
24376 call *%rdi
24377 addq $32, %rsp
24378 RESTORE_XMM
24379 + pax_force_retaddr 0, 1
24380 ret
24381 ENDPROC(efi_call4)
24382
24383 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
24384 call *%rdi
24385 addq $48, %rsp
24386 RESTORE_XMM
24387 + pax_force_retaddr 0, 1
24388 ret
24389 ENDPROC(efi_call5)
24390
24391 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
24392 call *%rdi
24393 addq $48, %rsp
24394 RESTORE_XMM
24395 + pax_force_retaddr 0, 1
24396 ret
24397 ENDPROC(efi_call6)
24398 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24399 index ad4ec1c..686479e 100644
24400 --- a/arch/x86/platform/mrst/mrst.c
24401 +++ b/arch/x86/platform/mrst/mrst.c
24402 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24403 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24404 int sfi_mrtc_num;
24405
24406 -static void mrst_power_off(void)
24407 +static __noreturn void mrst_power_off(void)
24408 {
24409 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24410 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24411 + BUG();
24412 }
24413
24414 -static void mrst_reboot(void)
24415 +static __noreturn void mrst_reboot(void)
24416 {
24417 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24418 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24419 else
24420 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24421 + BUG();
24422 }
24423
24424 /* parse all the mtimer info to a static mtimer array */
24425 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24426 index f10c0af..3ec1f95 100644
24427 --- a/arch/x86/power/cpu.c
24428 +++ b/arch/x86/power/cpu.c
24429 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
24430 static void fix_processor_context(void)
24431 {
24432 int cpu = smp_processor_id();
24433 - struct tss_struct *t = &per_cpu(init_tss, cpu);
24434 + struct tss_struct *t = init_tss + cpu;
24435
24436 set_tss_desc(cpu, t); /*
24437 * This just modifies memory; should not be
24438 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
24439 */
24440
24441 #ifdef CONFIG_X86_64
24442 + pax_open_kernel();
24443 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24444 + pax_close_kernel();
24445
24446 syscall_init(); /* This sets MSR_*STAR and related */
24447 #endif
24448 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24449 index 5d17950..2253fc9 100644
24450 --- a/arch/x86/vdso/Makefile
24451 +++ b/arch/x86/vdso/Makefile
24452 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24453 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24454 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24455
24456 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24457 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24458 GCOV_PROFILE := n
24459
24460 #
24461 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24462 index 468d591..8e80a0a 100644
24463 --- a/arch/x86/vdso/vdso32-setup.c
24464 +++ b/arch/x86/vdso/vdso32-setup.c
24465 @@ -25,6 +25,7 @@
24466 #include <asm/tlbflush.h>
24467 #include <asm/vdso.h>
24468 #include <asm/proto.h>
24469 +#include <asm/mman.h>
24470
24471 enum {
24472 VDSO_DISABLED = 0,
24473 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24474 void enable_sep_cpu(void)
24475 {
24476 int cpu = get_cpu();
24477 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
24478 + struct tss_struct *tss = init_tss + cpu;
24479
24480 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24481 put_cpu();
24482 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24483 gate_vma.vm_start = FIXADDR_USER_START;
24484 gate_vma.vm_end = FIXADDR_USER_END;
24485 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24486 - gate_vma.vm_page_prot = __P101;
24487 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24488 /*
24489 * Make sure the vDSO gets into every core dump.
24490 * Dumping its contents makes post-mortem fully interpretable later
24491 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24492 if (compat)
24493 addr = VDSO_HIGH_BASE;
24494 else {
24495 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24496 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24497 if (IS_ERR_VALUE(addr)) {
24498 ret = addr;
24499 goto up_fail;
24500 }
24501 }
24502
24503 - current->mm->context.vdso = (void *)addr;
24504 + current->mm->context.vdso = addr;
24505
24506 if (compat_uses_vma || !compat) {
24507 /*
24508 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24509 }
24510
24511 current_thread_info()->sysenter_return =
24512 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24513 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24514
24515 up_fail:
24516 if (ret)
24517 - current->mm->context.vdso = NULL;
24518 + current->mm->context.vdso = 0;
24519
24520 up_write(&mm->mmap_sem);
24521
24522 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24523
24524 const char *arch_vma_name(struct vm_area_struct *vma)
24525 {
24526 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24527 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24528 return "[vdso]";
24529 +
24530 +#ifdef CONFIG_PAX_SEGMEXEC
24531 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24532 + return "[vdso]";
24533 +#endif
24534 +
24535 return NULL;
24536 }
24537
24538 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24539 * Check to see if the corresponding task was created in compat vdso
24540 * mode.
24541 */
24542 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24543 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24544 return &gate_vma;
24545 return NULL;
24546 }
24547 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24548 index 153407c..611cba9 100644
24549 --- a/arch/x86/vdso/vma.c
24550 +++ b/arch/x86/vdso/vma.c
24551 @@ -16,8 +16,6 @@
24552 #include <asm/vdso.h>
24553 #include <asm/page.h>
24554
24555 -unsigned int __read_mostly vdso_enabled = 1;
24556 -
24557 extern char vdso_start[], vdso_end[];
24558 extern unsigned short vdso_sync_cpuid;
24559
24560 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24561 * unaligned here as a result of stack start randomization.
24562 */
24563 addr = PAGE_ALIGN(addr);
24564 - addr = align_addr(addr, NULL, ALIGN_VDSO);
24565
24566 return addr;
24567 }
24568 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24569 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24570 {
24571 struct mm_struct *mm = current->mm;
24572 - unsigned long addr;
24573 + unsigned long addr = 0;
24574 int ret;
24575
24576 - if (!vdso_enabled)
24577 - return 0;
24578 -
24579 down_write(&mm->mmap_sem);
24580 +
24581 +#ifdef CONFIG_PAX_RANDMMAP
24582 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24583 +#endif
24584 +
24585 addr = vdso_addr(mm->start_stack, vdso_size);
24586 + addr = align_addr(addr, NULL, ALIGN_VDSO);
24587 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24588 if (IS_ERR_VALUE(addr)) {
24589 ret = addr;
24590 goto up_fail;
24591 }
24592
24593 - current->mm->context.vdso = (void *)addr;
24594 + mm->context.vdso = addr;
24595
24596 ret = install_special_mapping(mm, addr, vdso_size,
24597 VM_READ|VM_EXEC|
24598 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24599 VM_ALWAYSDUMP,
24600 vdso_pages);
24601 - if (ret) {
24602 - current->mm->context.vdso = NULL;
24603 - goto up_fail;
24604 - }
24605 +
24606 + if (ret)
24607 + mm->context.vdso = 0;
24608
24609 up_fail:
24610 up_write(&mm->mmap_sem);
24611 return ret;
24612 }
24613 -
24614 -static __init int vdso_setup(char *s)
24615 -{
24616 - vdso_enabled = simple_strtoul(s, NULL, 0);
24617 - return 0;
24618 -}
24619 -__setup("vdso=", vdso_setup);
24620 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24621 index 1f92865..c843b20 100644
24622 --- a/arch/x86/xen/enlighten.c
24623 +++ b/arch/x86/xen/enlighten.c
24624 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24625
24626 struct shared_info xen_dummy_shared_info;
24627
24628 -void *xen_initial_gdt;
24629 -
24630 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24631 __read_mostly int xen_have_vector_callback;
24632 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24633 @@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24634 #endif
24635 };
24636
24637 -static void xen_reboot(int reason)
24638 +static __noreturn void xen_reboot(int reason)
24639 {
24640 struct sched_shutdown r = { .reason = reason };
24641
24642 @@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24643 BUG();
24644 }
24645
24646 -static void xen_restart(char *msg)
24647 +static __noreturn void xen_restart(char *msg)
24648 {
24649 xen_reboot(SHUTDOWN_reboot);
24650 }
24651
24652 -static void xen_emergency_restart(void)
24653 +static __noreturn void xen_emergency_restart(void)
24654 {
24655 xen_reboot(SHUTDOWN_reboot);
24656 }
24657
24658 -static void xen_machine_halt(void)
24659 +static __noreturn void xen_machine_halt(void)
24660 {
24661 xen_reboot(SHUTDOWN_poweroff);
24662 }
24663 @@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24664 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24665
24666 /* Work out if we support NX */
24667 - x86_configure_nx();
24668 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24669 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24670 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24671 + unsigned l, h;
24672 +
24673 + __supported_pte_mask |= _PAGE_NX;
24674 + rdmsr(MSR_EFER, l, h);
24675 + l |= EFER_NX;
24676 + wrmsr(MSR_EFER, l, h);
24677 + }
24678 +#endif
24679
24680 xen_setup_features();
24681
24682 @@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24683
24684 machine_ops = xen_machine_ops;
24685
24686 - /*
24687 - * The only reliable way to retain the initial address of the
24688 - * percpu gdt_page is to remember it here, so we can go and
24689 - * mark it RW later, when the initial percpu area is freed.
24690 - */
24691 - xen_initial_gdt = &per_cpu(gdt_page, 0);
24692 -
24693 xen_smp_init();
24694
24695 #ifdef CONFIG_ACPI_NUMA
24696 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24697 index 87f6673..e2555a6 100644
24698 --- a/arch/x86/xen/mmu.c
24699 +++ b/arch/x86/xen/mmu.c
24700 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24701 convert_pfn_mfn(init_level4_pgt);
24702 convert_pfn_mfn(level3_ident_pgt);
24703 convert_pfn_mfn(level3_kernel_pgt);
24704 + convert_pfn_mfn(level3_vmalloc_start_pgt);
24705 + convert_pfn_mfn(level3_vmalloc_end_pgt);
24706 + convert_pfn_mfn(level3_vmemmap_pgt);
24707
24708 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24709 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24710 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24711 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24712 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24713 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24714 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24715 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24716 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24717 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24718 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24719 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24720 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24721
24722 @@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
24723 pv_mmu_ops.set_pud = xen_set_pud;
24724 #if PAGETABLE_LEVELS == 4
24725 pv_mmu_ops.set_pgd = xen_set_pgd;
24726 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24727 #endif
24728
24729 /* This will work as long as patching hasn't happened yet
24730 @@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24731 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24732 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24733 .set_pgd = xen_set_pgd_hyper,
24734 + .set_pgd_batched = xen_set_pgd_hyper,
24735
24736 .alloc_pud = xen_alloc_pmd_init,
24737 .release_pud = xen_release_pmd_init,
24738 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24739 index 041d4fe..7666b7e 100644
24740 --- a/arch/x86/xen/smp.c
24741 +++ b/arch/x86/xen/smp.c
24742 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24743 {
24744 BUG_ON(smp_processor_id() != 0);
24745 native_smp_prepare_boot_cpu();
24746 -
24747 - /* We've switched to the "real" per-cpu gdt, so make sure the
24748 - old memory can be recycled */
24749 - make_lowmem_page_readwrite(xen_initial_gdt);
24750 -
24751 xen_filter_cpu_maps();
24752 xen_setup_vcpu_info_placement();
24753 }
24754 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24755 gdt = get_cpu_gdt_table(cpu);
24756
24757 ctxt->flags = VGCF_IN_KERNEL;
24758 - ctxt->user_regs.ds = __USER_DS;
24759 - ctxt->user_regs.es = __USER_DS;
24760 + ctxt->user_regs.ds = __KERNEL_DS;
24761 + ctxt->user_regs.es = __KERNEL_DS;
24762 ctxt->user_regs.ss = __KERNEL_DS;
24763 #ifdef CONFIG_X86_32
24764 ctxt->user_regs.fs = __KERNEL_PERCPU;
24765 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24766 + savesegment(gs, ctxt->user_regs.gs);
24767 #else
24768 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24769 #endif
24770 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24771 int rc;
24772
24773 per_cpu(current_task, cpu) = idle;
24774 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
24775 #ifdef CONFIG_X86_32
24776 irq_ctx_init(cpu);
24777 #else
24778 clear_tsk_thread_flag(idle, TIF_FORK);
24779 - per_cpu(kernel_stack, cpu) =
24780 - (unsigned long)task_stack_page(idle) -
24781 - KERNEL_STACK_OFFSET + THREAD_SIZE;
24782 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24783 #endif
24784 xen_setup_runstate_info(cpu);
24785 xen_setup_timer(cpu);
24786 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24787 index b040b0e..8cc4fe0 100644
24788 --- a/arch/x86/xen/xen-asm_32.S
24789 +++ b/arch/x86/xen/xen-asm_32.S
24790 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
24791 ESP_OFFSET=4 # bytes pushed onto stack
24792
24793 /*
24794 - * Store vcpu_info pointer for easy access. Do it this way to
24795 - * avoid having to reload %fs
24796 + * Store vcpu_info pointer for easy access.
24797 */
24798 #ifdef CONFIG_SMP
24799 - GET_THREAD_INFO(%eax)
24800 - movl TI_cpu(%eax), %eax
24801 - movl __per_cpu_offset(,%eax,4), %eax
24802 - mov xen_vcpu(%eax), %eax
24803 + push %fs
24804 + mov $(__KERNEL_PERCPU), %eax
24805 + mov %eax, %fs
24806 + mov PER_CPU_VAR(xen_vcpu), %eax
24807 + pop %fs
24808 #else
24809 movl xen_vcpu, %eax
24810 #endif
24811 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24812 index aaa7291..3f77960 100644
24813 --- a/arch/x86/xen/xen-head.S
24814 +++ b/arch/x86/xen/xen-head.S
24815 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
24816 #ifdef CONFIG_X86_32
24817 mov %esi,xen_start_info
24818 mov $init_thread_union+THREAD_SIZE,%esp
24819 +#ifdef CONFIG_SMP
24820 + movl $cpu_gdt_table,%edi
24821 + movl $__per_cpu_load,%eax
24822 + movw %ax,__KERNEL_PERCPU + 2(%edi)
24823 + rorl $16,%eax
24824 + movb %al,__KERNEL_PERCPU + 4(%edi)
24825 + movb %ah,__KERNEL_PERCPU + 7(%edi)
24826 + movl $__per_cpu_end - 1,%eax
24827 + subl $__per_cpu_start,%eax
24828 + movw %ax,__KERNEL_PERCPU + 0(%edi)
24829 +#endif
24830 #else
24831 mov %rsi,xen_start_info
24832 mov $init_thread_union+THREAD_SIZE,%rsp
24833 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24834 index b095739..8c17bcd 100644
24835 --- a/arch/x86/xen/xen-ops.h
24836 +++ b/arch/x86/xen/xen-ops.h
24837 @@ -10,8 +10,6 @@
24838 extern const char xen_hypervisor_callback[];
24839 extern const char xen_failsafe_callback[];
24840
24841 -extern void *xen_initial_gdt;
24842 -
24843 struct trap_info;
24844 void xen_copy_trap_info(struct trap_info *traps);
24845
24846 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24847 index 58916af..9cb880b 100644
24848 --- a/block/blk-iopoll.c
24849 +++ b/block/blk-iopoll.c
24850 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24851 }
24852 EXPORT_SYMBOL(blk_iopoll_complete);
24853
24854 -static void blk_iopoll_softirq(struct softirq_action *h)
24855 +static void blk_iopoll_softirq(void)
24856 {
24857 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24858 int rearm = 0, budget = blk_iopoll_budget;
24859 diff --git a/block/blk-map.c b/block/blk-map.c
24860 index 623e1cd..ca1e109 100644
24861 --- a/block/blk-map.c
24862 +++ b/block/blk-map.c
24863 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24864 if (!len || !kbuf)
24865 return -EINVAL;
24866
24867 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24868 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24869 if (do_copy)
24870 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24871 else
24872 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24873 index 1366a89..e17f54b 100644
24874 --- a/block/blk-softirq.c
24875 +++ b/block/blk-softirq.c
24876 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24877 * Softirq action handler - move entries to local list and loop over them
24878 * while passing them to the queue registered handler.
24879 */
24880 -static void blk_done_softirq(struct softirq_action *h)
24881 +static void blk_done_softirq(void)
24882 {
24883 struct list_head *cpu_list, local_list;
24884
24885 diff --git a/block/bsg.c b/block/bsg.c
24886 index 702f131..37808bf 100644
24887 --- a/block/bsg.c
24888 +++ b/block/bsg.c
24889 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24890 struct sg_io_v4 *hdr, struct bsg_device *bd,
24891 fmode_t has_write_perm)
24892 {
24893 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24894 + unsigned char *cmdptr;
24895 +
24896 if (hdr->request_len > BLK_MAX_CDB) {
24897 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24898 if (!rq->cmd)
24899 return -ENOMEM;
24900 - }
24901 + cmdptr = rq->cmd;
24902 + } else
24903 + cmdptr = tmpcmd;
24904
24905 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24906 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24907 hdr->request_len))
24908 return -EFAULT;
24909
24910 + if (cmdptr != rq->cmd)
24911 + memcpy(rq->cmd, cmdptr, hdr->request_len);
24912 +
24913 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24914 if (blk_verify_command(rq->cmd, has_write_perm))
24915 return -EPERM;
24916 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24917 index 7b72502..646105c 100644
24918 --- a/block/compat_ioctl.c
24919 +++ b/block/compat_ioctl.c
24920 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24921 err |= __get_user(f->spec1, &uf->spec1);
24922 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24923 err |= __get_user(name, &uf->name);
24924 - f->name = compat_ptr(name);
24925 + f->name = (void __force_kernel *)compat_ptr(name);
24926 if (err) {
24927 err = -EFAULT;
24928 goto out;
24929 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24930 index 688be8a..8a37d98 100644
24931 --- a/block/scsi_ioctl.c
24932 +++ b/block/scsi_ioctl.c
24933 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
24934 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24935 struct sg_io_hdr *hdr, fmode_t mode)
24936 {
24937 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24938 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24939 + unsigned char *cmdptr;
24940 +
24941 + if (rq->cmd != rq->__cmd)
24942 + cmdptr = rq->cmd;
24943 + else
24944 + cmdptr = tmpcmd;
24945 +
24946 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24947 return -EFAULT;
24948 +
24949 + if (cmdptr != rq->cmd)
24950 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24951 +
24952 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24953 return -EPERM;
24954
24955 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24956 int err;
24957 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24958 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24959 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24960 + unsigned char *cmdptr;
24961
24962 if (!sic)
24963 return -EINVAL;
24964 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24965 */
24966 err = -EFAULT;
24967 rq->cmd_len = cmdlen;
24968 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
24969 +
24970 + if (rq->cmd != rq->__cmd)
24971 + cmdptr = rq->cmd;
24972 + else
24973 + cmdptr = tmpcmd;
24974 +
24975 + if (copy_from_user(cmdptr, sic->data, cmdlen))
24976 goto error;
24977
24978 + if (rq->cmd != cmdptr)
24979 + memcpy(rq->cmd, cmdptr, cmdlen);
24980 +
24981 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24982 goto error;
24983
24984 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
24985 index 671d4d6..5f24030 100644
24986 --- a/crypto/cryptd.c
24987 +++ b/crypto/cryptd.c
24988 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
24989
24990 struct cryptd_blkcipher_request_ctx {
24991 crypto_completion_t complete;
24992 -};
24993 +} __no_const;
24994
24995 struct cryptd_hash_ctx {
24996 struct crypto_shash *child;
24997 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
24998
24999 struct cryptd_aead_request_ctx {
25000 crypto_completion_t complete;
25001 -};
25002 +} __no_const;
25003
25004 static void cryptd_queue_worker(struct work_struct *work);
25005
25006 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25007 index 5d41894..22021e4 100644
25008 --- a/drivers/acpi/apei/cper.c
25009 +++ b/drivers/acpi/apei/cper.c
25010 @@ -38,12 +38,12 @@
25011 */
25012 u64 cper_next_record_id(void)
25013 {
25014 - static atomic64_t seq;
25015 + static atomic64_unchecked_t seq;
25016
25017 - if (!atomic64_read(&seq))
25018 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
25019 + if (!atomic64_read_unchecked(&seq))
25020 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25021
25022 - return atomic64_inc_return(&seq);
25023 + return atomic64_inc_return_unchecked(&seq);
25024 }
25025 EXPORT_SYMBOL_GPL(cper_next_record_id);
25026
25027 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25028 index 6c47ae9..8ab9132 100644
25029 --- a/drivers/acpi/ec_sys.c
25030 +++ b/drivers/acpi/ec_sys.c
25031 @@ -12,6 +12,7 @@
25032 #include <linux/acpi.h>
25033 #include <linux/debugfs.h>
25034 #include <linux/module.h>
25035 +#include <asm/uaccess.h>
25036 #include "internal.h"
25037
25038 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25039 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25040 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25041 */
25042 unsigned int size = EC_SPACE_SIZE;
25043 - u8 *data = (u8 *) buf;
25044 + u8 data;
25045 loff_t init_off = *off;
25046 int err = 0;
25047
25048 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25049 size = count;
25050
25051 while (size) {
25052 - err = ec_read(*off, &data[*off - init_off]);
25053 + err = ec_read(*off, &data);
25054 if (err)
25055 return err;
25056 + if (put_user(data, &buf[*off - init_off]))
25057 + return -EFAULT;
25058 *off += 1;
25059 size--;
25060 }
25061 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25062
25063 unsigned int size = count;
25064 loff_t init_off = *off;
25065 - u8 *data = (u8 *) buf;
25066 int err = 0;
25067
25068 if (*off >= EC_SPACE_SIZE)
25069 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25070 }
25071
25072 while (size) {
25073 - u8 byte_write = data[*off - init_off];
25074 + u8 byte_write;
25075 + if (get_user(byte_write, &buf[*off - init_off]))
25076 + return -EFAULT;
25077 err = ec_write(*off, byte_write);
25078 if (err)
25079 return err;
25080 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25081 index 251c7b62..000462d 100644
25082 --- a/drivers/acpi/proc.c
25083 +++ b/drivers/acpi/proc.c
25084 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25085 size_t count, loff_t * ppos)
25086 {
25087 struct list_head *node, *next;
25088 - char strbuf[5];
25089 - char str[5] = "";
25090 - unsigned int len = count;
25091 + char strbuf[5] = {0};
25092
25093 - if (len > 4)
25094 - len = 4;
25095 - if (len < 0)
25096 + if (count > 4)
25097 + count = 4;
25098 + if (copy_from_user(strbuf, buffer, count))
25099 return -EFAULT;
25100 -
25101 - if (copy_from_user(strbuf, buffer, len))
25102 - return -EFAULT;
25103 - strbuf[len] = '\0';
25104 - sscanf(strbuf, "%s", str);
25105 + strbuf[count] = '\0';
25106
25107 mutex_lock(&acpi_device_lock);
25108 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25109 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25110 if (!dev->wakeup.flags.valid)
25111 continue;
25112
25113 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
25114 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25115 if (device_can_wakeup(&dev->dev)) {
25116 bool enable = !device_may_wakeup(&dev->dev);
25117 device_set_wakeup_enable(&dev->dev, enable);
25118 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25119 index 9d7bc9f..a6fc091 100644
25120 --- a/drivers/acpi/processor_driver.c
25121 +++ b/drivers/acpi/processor_driver.c
25122 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25123 return 0;
25124 #endif
25125
25126 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25127 + BUG_ON(pr->id >= nr_cpu_ids);
25128
25129 /*
25130 * Buggy BIOS check
25131 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25132 index c04ad68..0b99473 100644
25133 --- a/drivers/ata/libata-core.c
25134 +++ b/drivers/ata/libata-core.c
25135 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25136 struct ata_port *ap;
25137 unsigned int tag;
25138
25139 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25140 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25141 ap = qc->ap;
25142
25143 qc->flags = 0;
25144 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25145 struct ata_port *ap;
25146 struct ata_link *link;
25147
25148 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25149 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25150 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25151 ap = qc->ap;
25152 link = qc->dev->link;
25153 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25154 return;
25155
25156 spin_lock(&lock);
25157 + pax_open_kernel();
25158
25159 for (cur = ops->inherits; cur; cur = cur->inherits) {
25160 void **inherit = (void **)cur;
25161 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25162 if (IS_ERR(*pp))
25163 *pp = NULL;
25164
25165 - ops->inherits = NULL;
25166 + *(struct ata_port_operations **)&ops->inherits = NULL;
25167
25168 + pax_close_kernel();
25169 spin_unlock(&lock);
25170 }
25171
25172 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25173 index e8574bb..f9f6a72 100644
25174 --- a/drivers/ata/pata_arasan_cf.c
25175 +++ b/drivers/ata/pata_arasan_cf.c
25176 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25177 /* Handle platform specific quirks */
25178 if (pdata->quirk) {
25179 if (pdata->quirk & CF_BROKEN_PIO) {
25180 - ap->ops->set_piomode = NULL;
25181 + pax_open_kernel();
25182 + *(void **)&ap->ops->set_piomode = NULL;
25183 + pax_close_kernel();
25184 ap->pio_mask = 0;
25185 }
25186 if (pdata->quirk & CF_BROKEN_MWDMA)
25187 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25188 index f9b983a..887b9d8 100644
25189 --- a/drivers/atm/adummy.c
25190 +++ b/drivers/atm/adummy.c
25191 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25192 vcc->pop(vcc, skb);
25193 else
25194 dev_kfree_skb_any(skb);
25195 - atomic_inc(&vcc->stats->tx);
25196 + atomic_inc_unchecked(&vcc->stats->tx);
25197
25198 return 0;
25199 }
25200 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25201 index f8f41e0..1f987dd 100644
25202 --- a/drivers/atm/ambassador.c
25203 +++ b/drivers/atm/ambassador.c
25204 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25205 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25206
25207 // VC layer stats
25208 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25209 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25210
25211 // free the descriptor
25212 kfree (tx_descr);
25213 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25214 dump_skb ("<<<", vc, skb);
25215
25216 // VC layer stats
25217 - atomic_inc(&atm_vcc->stats->rx);
25218 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25219 __net_timestamp(skb);
25220 // end of our responsibility
25221 atm_vcc->push (atm_vcc, skb);
25222 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25223 } else {
25224 PRINTK (KERN_INFO, "dropped over-size frame");
25225 // should we count this?
25226 - atomic_inc(&atm_vcc->stats->rx_drop);
25227 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25228 }
25229
25230 } else {
25231 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25232 }
25233
25234 if (check_area (skb->data, skb->len)) {
25235 - atomic_inc(&atm_vcc->stats->tx_err);
25236 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25237 return -ENOMEM; // ?
25238 }
25239
25240 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25241 index b22d71c..d6e1049 100644
25242 --- a/drivers/atm/atmtcp.c
25243 +++ b/drivers/atm/atmtcp.c
25244 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25245 if (vcc->pop) vcc->pop(vcc,skb);
25246 else dev_kfree_skb(skb);
25247 if (dev_data) return 0;
25248 - atomic_inc(&vcc->stats->tx_err);
25249 + atomic_inc_unchecked(&vcc->stats->tx_err);
25250 return -ENOLINK;
25251 }
25252 size = skb->len+sizeof(struct atmtcp_hdr);
25253 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25254 if (!new_skb) {
25255 if (vcc->pop) vcc->pop(vcc,skb);
25256 else dev_kfree_skb(skb);
25257 - atomic_inc(&vcc->stats->tx_err);
25258 + atomic_inc_unchecked(&vcc->stats->tx_err);
25259 return -ENOBUFS;
25260 }
25261 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25262 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25263 if (vcc->pop) vcc->pop(vcc,skb);
25264 else dev_kfree_skb(skb);
25265 out_vcc->push(out_vcc,new_skb);
25266 - atomic_inc(&vcc->stats->tx);
25267 - atomic_inc(&out_vcc->stats->rx);
25268 + atomic_inc_unchecked(&vcc->stats->tx);
25269 + atomic_inc_unchecked(&out_vcc->stats->rx);
25270 return 0;
25271 }
25272
25273 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25274 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25275 read_unlock(&vcc_sklist_lock);
25276 if (!out_vcc) {
25277 - atomic_inc(&vcc->stats->tx_err);
25278 + atomic_inc_unchecked(&vcc->stats->tx_err);
25279 goto done;
25280 }
25281 skb_pull(skb,sizeof(struct atmtcp_hdr));
25282 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25283 __net_timestamp(new_skb);
25284 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25285 out_vcc->push(out_vcc,new_skb);
25286 - atomic_inc(&vcc->stats->tx);
25287 - atomic_inc(&out_vcc->stats->rx);
25288 + atomic_inc_unchecked(&vcc->stats->tx);
25289 + atomic_inc_unchecked(&out_vcc->stats->rx);
25290 done:
25291 if (vcc->pop) vcc->pop(vcc,skb);
25292 else dev_kfree_skb(skb);
25293 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25294 index 956e9ac..133516d 100644
25295 --- a/drivers/atm/eni.c
25296 +++ b/drivers/atm/eni.c
25297 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25298 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25299 vcc->dev->number);
25300 length = 0;
25301 - atomic_inc(&vcc->stats->rx_err);
25302 + atomic_inc_unchecked(&vcc->stats->rx_err);
25303 }
25304 else {
25305 length = ATM_CELL_SIZE-1; /* no HEC */
25306 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25307 size);
25308 }
25309 eff = length = 0;
25310 - atomic_inc(&vcc->stats->rx_err);
25311 + atomic_inc_unchecked(&vcc->stats->rx_err);
25312 }
25313 else {
25314 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25315 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25316 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25317 vcc->dev->number,vcc->vci,length,size << 2,descr);
25318 length = eff = 0;
25319 - atomic_inc(&vcc->stats->rx_err);
25320 + atomic_inc_unchecked(&vcc->stats->rx_err);
25321 }
25322 }
25323 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25324 @@ -771,7 +771,7 @@ rx_dequeued++;
25325 vcc->push(vcc,skb);
25326 pushed++;
25327 }
25328 - atomic_inc(&vcc->stats->rx);
25329 + atomic_inc_unchecked(&vcc->stats->rx);
25330 }
25331 wake_up(&eni_dev->rx_wait);
25332 }
25333 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25334 PCI_DMA_TODEVICE);
25335 if (vcc->pop) vcc->pop(vcc,skb);
25336 else dev_kfree_skb_irq(skb);
25337 - atomic_inc(&vcc->stats->tx);
25338 + atomic_inc_unchecked(&vcc->stats->tx);
25339 wake_up(&eni_dev->tx_wait);
25340 dma_complete++;
25341 }
25342 @@ -1569,7 +1569,7 @@ tx_complete++;
25343 /*--------------------------------- entries ---------------------------------*/
25344
25345
25346 -static const char *media_name[] __devinitdata = {
25347 +static const char *media_name[] __devinitconst = {
25348 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25349 "UTP", "05?", "06?", "07?", /* 4- 7 */
25350 "TAXI","09?", "10?", "11?", /* 8-11 */
25351 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25352 index 5072f8a..fa52520d 100644
25353 --- a/drivers/atm/firestream.c
25354 +++ b/drivers/atm/firestream.c
25355 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25356 }
25357 }
25358
25359 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25360 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25361
25362 fs_dprintk (FS_DEBUG_TXMEM, "i");
25363 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25364 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25365 #endif
25366 skb_put (skb, qe->p1 & 0xffff);
25367 ATM_SKB(skb)->vcc = atm_vcc;
25368 - atomic_inc(&atm_vcc->stats->rx);
25369 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25370 __net_timestamp(skb);
25371 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25372 atm_vcc->push (atm_vcc, skb);
25373 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25374 kfree (pe);
25375 }
25376 if (atm_vcc)
25377 - atomic_inc(&atm_vcc->stats->rx_drop);
25378 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25379 break;
25380 case 0x1f: /* Reassembly abort: no buffers. */
25381 /* Silently increment error counter. */
25382 if (atm_vcc)
25383 - atomic_inc(&atm_vcc->stats->rx_drop);
25384 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25385 break;
25386 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25387 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25388 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25389 index 361f5ae..7fc552d 100644
25390 --- a/drivers/atm/fore200e.c
25391 +++ b/drivers/atm/fore200e.c
25392 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25393 #endif
25394 /* check error condition */
25395 if (*entry->status & STATUS_ERROR)
25396 - atomic_inc(&vcc->stats->tx_err);
25397 + atomic_inc_unchecked(&vcc->stats->tx_err);
25398 else
25399 - atomic_inc(&vcc->stats->tx);
25400 + atomic_inc_unchecked(&vcc->stats->tx);
25401 }
25402 }
25403
25404 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25405 if (skb == NULL) {
25406 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25407
25408 - atomic_inc(&vcc->stats->rx_drop);
25409 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25410 return -ENOMEM;
25411 }
25412
25413 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25414
25415 dev_kfree_skb_any(skb);
25416
25417 - atomic_inc(&vcc->stats->rx_drop);
25418 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25419 return -ENOMEM;
25420 }
25421
25422 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25423
25424 vcc->push(vcc, skb);
25425 - atomic_inc(&vcc->stats->rx);
25426 + atomic_inc_unchecked(&vcc->stats->rx);
25427
25428 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25429
25430 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25431 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25432 fore200e->atm_dev->number,
25433 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25434 - atomic_inc(&vcc->stats->rx_err);
25435 + atomic_inc_unchecked(&vcc->stats->rx_err);
25436 }
25437 }
25438
25439 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25440 goto retry_here;
25441 }
25442
25443 - atomic_inc(&vcc->stats->tx_err);
25444 + atomic_inc_unchecked(&vcc->stats->tx_err);
25445
25446 fore200e->tx_sat++;
25447 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25448 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25449 index 9a51df4..f3bb5f8 100644
25450 --- a/drivers/atm/he.c
25451 +++ b/drivers/atm/he.c
25452 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25453
25454 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25455 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25456 - atomic_inc(&vcc->stats->rx_drop);
25457 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25458 goto return_host_buffers;
25459 }
25460
25461 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25462 RBRQ_LEN_ERR(he_dev->rbrq_head)
25463 ? "LEN_ERR" : "",
25464 vcc->vpi, vcc->vci);
25465 - atomic_inc(&vcc->stats->rx_err);
25466 + atomic_inc_unchecked(&vcc->stats->rx_err);
25467 goto return_host_buffers;
25468 }
25469
25470 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25471 vcc->push(vcc, skb);
25472 spin_lock(&he_dev->global_lock);
25473
25474 - atomic_inc(&vcc->stats->rx);
25475 + atomic_inc_unchecked(&vcc->stats->rx);
25476
25477 return_host_buffers:
25478 ++pdus_assembled;
25479 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25480 tpd->vcc->pop(tpd->vcc, tpd->skb);
25481 else
25482 dev_kfree_skb_any(tpd->skb);
25483 - atomic_inc(&tpd->vcc->stats->tx_err);
25484 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25485 }
25486 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25487 return;
25488 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25489 vcc->pop(vcc, skb);
25490 else
25491 dev_kfree_skb_any(skb);
25492 - atomic_inc(&vcc->stats->tx_err);
25493 + atomic_inc_unchecked(&vcc->stats->tx_err);
25494 return -EINVAL;
25495 }
25496
25497 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25498 vcc->pop(vcc, skb);
25499 else
25500 dev_kfree_skb_any(skb);
25501 - atomic_inc(&vcc->stats->tx_err);
25502 + atomic_inc_unchecked(&vcc->stats->tx_err);
25503 return -EINVAL;
25504 }
25505 #endif
25506 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25507 vcc->pop(vcc, skb);
25508 else
25509 dev_kfree_skb_any(skb);
25510 - atomic_inc(&vcc->stats->tx_err);
25511 + atomic_inc_unchecked(&vcc->stats->tx_err);
25512 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25513 return -ENOMEM;
25514 }
25515 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25516 vcc->pop(vcc, skb);
25517 else
25518 dev_kfree_skb_any(skb);
25519 - atomic_inc(&vcc->stats->tx_err);
25520 + atomic_inc_unchecked(&vcc->stats->tx_err);
25521 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25522 return -ENOMEM;
25523 }
25524 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25525 __enqueue_tpd(he_dev, tpd, cid);
25526 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25527
25528 - atomic_inc(&vcc->stats->tx);
25529 + atomic_inc_unchecked(&vcc->stats->tx);
25530
25531 return 0;
25532 }
25533 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25534 index b812103..e391a49 100644
25535 --- a/drivers/atm/horizon.c
25536 +++ b/drivers/atm/horizon.c
25537 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25538 {
25539 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25540 // VC layer stats
25541 - atomic_inc(&vcc->stats->rx);
25542 + atomic_inc_unchecked(&vcc->stats->rx);
25543 __net_timestamp(skb);
25544 // end of our responsibility
25545 vcc->push (vcc, skb);
25546 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25547 dev->tx_iovec = NULL;
25548
25549 // VC layer stats
25550 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25551 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25552
25553 // free the skb
25554 hrz_kfree_skb (skb);
25555 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25556 index 1c05212..c28e200 100644
25557 --- a/drivers/atm/idt77252.c
25558 +++ b/drivers/atm/idt77252.c
25559 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25560 else
25561 dev_kfree_skb(skb);
25562
25563 - atomic_inc(&vcc->stats->tx);
25564 + atomic_inc_unchecked(&vcc->stats->tx);
25565 }
25566
25567 atomic_dec(&scq->used);
25568 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25569 if ((sb = dev_alloc_skb(64)) == NULL) {
25570 printk("%s: Can't allocate buffers for aal0.\n",
25571 card->name);
25572 - atomic_add(i, &vcc->stats->rx_drop);
25573 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25574 break;
25575 }
25576 if (!atm_charge(vcc, sb->truesize)) {
25577 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25578 card->name);
25579 - atomic_add(i - 1, &vcc->stats->rx_drop);
25580 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25581 dev_kfree_skb(sb);
25582 break;
25583 }
25584 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25585 ATM_SKB(sb)->vcc = vcc;
25586 __net_timestamp(sb);
25587 vcc->push(vcc, sb);
25588 - atomic_inc(&vcc->stats->rx);
25589 + atomic_inc_unchecked(&vcc->stats->rx);
25590
25591 cell += ATM_CELL_PAYLOAD;
25592 }
25593 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25594 "(CDC: %08x)\n",
25595 card->name, len, rpp->len, readl(SAR_REG_CDC));
25596 recycle_rx_pool_skb(card, rpp);
25597 - atomic_inc(&vcc->stats->rx_err);
25598 + atomic_inc_unchecked(&vcc->stats->rx_err);
25599 return;
25600 }
25601 if (stat & SAR_RSQE_CRC) {
25602 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25603 recycle_rx_pool_skb(card, rpp);
25604 - atomic_inc(&vcc->stats->rx_err);
25605 + atomic_inc_unchecked(&vcc->stats->rx_err);
25606 return;
25607 }
25608 if (skb_queue_len(&rpp->queue) > 1) {
25609 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25610 RXPRINTK("%s: Can't alloc RX skb.\n",
25611 card->name);
25612 recycle_rx_pool_skb(card, rpp);
25613 - atomic_inc(&vcc->stats->rx_err);
25614 + atomic_inc_unchecked(&vcc->stats->rx_err);
25615 return;
25616 }
25617 if (!atm_charge(vcc, skb->truesize)) {
25618 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25619 __net_timestamp(skb);
25620
25621 vcc->push(vcc, skb);
25622 - atomic_inc(&vcc->stats->rx);
25623 + atomic_inc_unchecked(&vcc->stats->rx);
25624
25625 return;
25626 }
25627 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25628 __net_timestamp(skb);
25629
25630 vcc->push(vcc, skb);
25631 - atomic_inc(&vcc->stats->rx);
25632 + atomic_inc_unchecked(&vcc->stats->rx);
25633
25634 if (skb->truesize > SAR_FB_SIZE_3)
25635 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25636 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25637 if (vcc->qos.aal != ATM_AAL0) {
25638 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25639 card->name, vpi, vci);
25640 - atomic_inc(&vcc->stats->rx_drop);
25641 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25642 goto drop;
25643 }
25644
25645 if ((sb = dev_alloc_skb(64)) == NULL) {
25646 printk("%s: Can't allocate buffers for AAL0.\n",
25647 card->name);
25648 - atomic_inc(&vcc->stats->rx_err);
25649 + atomic_inc_unchecked(&vcc->stats->rx_err);
25650 goto drop;
25651 }
25652
25653 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25654 ATM_SKB(sb)->vcc = vcc;
25655 __net_timestamp(sb);
25656 vcc->push(vcc, sb);
25657 - atomic_inc(&vcc->stats->rx);
25658 + atomic_inc_unchecked(&vcc->stats->rx);
25659
25660 drop:
25661 skb_pull(queue, 64);
25662 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25663
25664 if (vc == NULL) {
25665 printk("%s: NULL connection in send().\n", card->name);
25666 - atomic_inc(&vcc->stats->tx_err);
25667 + atomic_inc_unchecked(&vcc->stats->tx_err);
25668 dev_kfree_skb(skb);
25669 return -EINVAL;
25670 }
25671 if (!test_bit(VCF_TX, &vc->flags)) {
25672 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25673 - atomic_inc(&vcc->stats->tx_err);
25674 + atomic_inc_unchecked(&vcc->stats->tx_err);
25675 dev_kfree_skb(skb);
25676 return -EINVAL;
25677 }
25678 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25679 break;
25680 default:
25681 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25682 - atomic_inc(&vcc->stats->tx_err);
25683 + atomic_inc_unchecked(&vcc->stats->tx_err);
25684 dev_kfree_skb(skb);
25685 return -EINVAL;
25686 }
25687
25688 if (skb_shinfo(skb)->nr_frags != 0) {
25689 printk("%s: No scatter-gather yet.\n", card->name);
25690 - atomic_inc(&vcc->stats->tx_err);
25691 + atomic_inc_unchecked(&vcc->stats->tx_err);
25692 dev_kfree_skb(skb);
25693 return -EINVAL;
25694 }
25695 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25696
25697 err = queue_skb(card, vc, skb, oam);
25698 if (err) {
25699 - atomic_inc(&vcc->stats->tx_err);
25700 + atomic_inc_unchecked(&vcc->stats->tx_err);
25701 dev_kfree_skb(skb);
25702 return err;
25703 }
25704 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25705 skb = dev_alloc_skb(64);
25706 if (!skb) {
25707 printk("%s: Out of memory in send_oam().\n", card->name);
25708 - atomic_inc(&vcc->stats->tx_err);
25709 + atomic_inc_unchecked(&vcc->stats->tx_err);
25710 return -ENOMEM;
25711 }
25712 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25713 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25714 index 3d0c2b0..45441fa 100644
25715 --- a/drivers/atm/iphase.c
25716 +++ b/drivers/atm/iphase.c
25717 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25718 status = (u_short) (buf_desc_ptr->desc_mode);
25719 if (status & (RX_CER | RX_PTE | RX_OFL))
25720 {
25721 - atomic_inc(&vcc->stats->rx_err);
25722 + atomic_inc_unchecked(&vcc->stats->rx_err);
25723 IF_ERR(printk("IA: bad packet, dropping it");)
25724 if (status & RX_CER) {
25725 IF_ERR(printk(" cause: packet CRC error\n");)
25726 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
25727 len = dma_addr - buf_addr;
25728 if (len > iadev->rx_buf_sz) {
25729 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25730 - atomic_inc(&vcc->stats->rx_err);
25731 + atomic_inc_unchecked(&vcc->stats->rx_err);
25732 goto out_free_desc;
25733 }
25734
25735 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25736 ia_vcc = INPH_IA_VCC(vcc);
25737 if (ia_vcc == NULL)
25738 {
25739 - atomic_inc(&vcc->stats->rx_err);
25740 + atomic_inc_unchecked(&vcc->stats->rx_err);
25741 dev_kfree_skb_any(skb);
25742 atm_return(vcc, atm_guess_pdu2truesize(len));
25743 goto INCR_DLE;
25744 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25745 if ((length > iadev->rx_buf_sz) || (length >
25746 (skb->len - sizeof(struct cpcs_trailer))))
25747 {
25748 - atomic_inc(&vcc->stats->rx_err);
25749 + atomic_inc_unchecked(&vcc->stats->rx_err);
25750 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25751 length, skb->len);)
25752 dev_kfree_skb_any(skb);
25753 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25754
25755 IF_RX(printk("rx_dle_intr: skb push");)
25756 vcc->push(vcc,skb);
25757 - atomic_inc(&vcc->stats->rx);
25758 + atomic_inc_unchecked(&vcc->stats->rx);
25759 iadev->rx_pkt_cnt++;
25760 }
25761 INCR_DLE:
25762 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25763 {
25764 struct k_sonet_stats *stats;
25765 stats = &PRIV(_ia_dev[board])->sonet_stats;
25766 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25767 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25768 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25769 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25770 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25771 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25772 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25773 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25774 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25775 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25776 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25777 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25778 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25779 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25780 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25781 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25782 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25783 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25784 }
25785 ia_cmds.status = 0;
25786 break;
25787 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25788 if ((desc == 0) || (desc > iadev->num_tx_desc))
25789 {
25790 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25791 - atomic_inc(&vcc->stats->tx);
25792 + atomic_inc_unchecked(&vcc->stats->tx);
25793 if (vcc->pop)
25794 vcc->pop(vcc, skb);
25795 else
25796 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25797 ATM_DESC(skb) = vcc->vci;
25798 skb_queue_tail(&iadev->tx_dma_q, skb);
25799
25800 - atomic_inc(&vcc->stats->tx);
25801 + atomic_inc_unchecked(&vcc->stats->tx);
25802 iadev->tx_pkt_cnt++;
25803 /* Increment transaction counter */
25804 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25805
25806 #if 0
25807 /* add flow control logic */
25808 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25809 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25810 if (iavcc->vc_desc_cnt > 10) {
25811 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25812 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25813 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25814 index f556969..0da15eb 100644
25815 --- a/drivers/atm/lanai.c
25816 +++ b/drivers/atm/lanai.c
25817 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25818 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25819 lanai_endtx(lanai, lvcc);
25820 lanai_free_skb(lvcc->tx.atmvcc, skb);
25821 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25822 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25823 }
25824
25825 /* Try to fill the buffer - don't call unless there is backlog */
25826 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25827 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25828 __net_timestamp(skb);
25829 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25830 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25831 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25832 out:
25833 lvcc->rx.buf.ptr = end;
25834 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25835 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25836 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25837 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25838 lanai->stats.service_rxnotaal5++;
25839 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25840 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25841 return 0;
25842 }
25843 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25844 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25845 int bytes;
25846 read_unlock(&vcc_sklist_lock);
25847 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25848 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25849 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25850 lvcc->stats.x.aal5.service_trash++;
25851 bytes = (SERVICE_GET_END(s) * 16) -
25852 (((unsigned long) lvcc->rx.buf.ptr) -
25853 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25854 }
25855 if (s & SERVICE_STREAM) {
25856 read_unlock(&vcc_sklist_lock);
25857 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25858 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25859 lvcc->stats.x.aal5.service_stream++;
25860 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25861 "PDU on VCI %d!\n", lanai->number, vci);
25862 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25863 return 0;
25864 }
25865 DPRINTK("got rx crc error on vci %d\n", vci);
25866 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25867 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25868 lvcc->stats.x.aal5.service_rxcrc++;
25869 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25870 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25871 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25872 index 1c70c45..300718d 100644
25873 --- a/drivers/atm/nicstar.c
25874 +++ b/drivers/atm/nicstar.c
25875 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25876 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25877 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25878 card->index);
25879 - atomic_inc(&vcc->stats->tx_err);
25880 + atomic_inc_unchecked(&vcc->stats->tx_err);
25881 dev_kfree_skb_any(skb);
25882 return -EINVAL;
25883 }
25884 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25885 if (!vc->tx) {
25886 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25887 card->index);
25888 - atomic_inc(&vcc->stats->tx_err);
25889 + atomic_inc_unchecked(&vcc->stats->tx_err);
25890 dev_kfree_skb_any(skb);
25891 return -EINVAL;
25892 }
25893 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25894 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25895 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25896 card->index);
25897 - atomic_inc(&vcc->stats->tx_err);
25898 + atomic_inc_unchecked(&vcc->stats->tx_err);
25899 dev_kfree_skb_any(skb);
25900 return -EINVAL;
25901 }
25902
25903 if (skb_shinfo(skb)->nr_frags != 0) {
25904 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25905 - atomic_inc(&vcc->stats->tx_err);
25906 + atomic_inc_unchecked(&vcc->stats->tx_err);
25907 dev_kfree_skb_any(skb);
25908 return -EINVAL;
25909 }
25910 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25911 }
25912
25913 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
25914 - atomic_inc(&vcc->stats->tx_err);
25915 + atomic_inc_unchecked(&vcc->stats->tx_err);
25916 dev_kfree_skb_any(skb);
25917 return -EIO;
25918 }
25919 - atomic_inc(&vcc->stats->tx);
25920 + atomic_inc_unchecked(&vcc->stats->tx);
25921
25922 return 0;
25923 }
25924 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25925 printk
25926 ("nicstar%d: Can't allocate buffers for aal0.\n",
25927 card->index);
25928 - atomic_add(i, &vcc->stats->rx_drop);
25929 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25930 break;
25931 }
25932 if (!atm_charge(vcc, sb->truesize)) {
25933 RXPRINTK
25934 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
25935 card->index);
25936 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25937 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25938 dev_kfree_skb_any(sb);
25939 break;
25940 }
25941 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25942 ATM_SKB(sb)->vcc = vcc;
25943 __net_timestamp(sb);
25944 vcc->push(vcc, sb);
25945 - atomic_inc(&vcc->stats->rx);
25946 + atomic_inc_unchecked(&vcc->stats->rx);
25947 cell += ATM_CELL_PAYLOAD;
25948 }
25949
25950 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25951 if (iovb == NULL) {
25952 printk("nicstar%d: Out of iovec buffers.\n",
25953 card->index);
25954 - atomic_inc(&vcc->stats->rx_drop);
25955 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25956 recycle_rx_buf(card, skb);
25957 return;
25958 }
25959 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25960 small or large buffer itself. */
25961 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
25962 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25963 - atomic_inc(&vcc->stats->rx_err);
25964 + atomic_inc_unchecked(&vcc->stats->rx_err);
25965 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25966 NS_MAX_IOVECS);
25967 NS_PRV_IOVCNT(iovb) = 0;
25968 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25969 ("nicstar%d: Expected a small buffer, and this is not one.\n",
25970 card->index);
25971 which_list(card, skb);
25972 - atomic_inc(&vcc->stats->rx_err);
25973 + atomic_inc_unchecked(&vcc->stats->rx_err);
25974 recycle_rx_buf(card, skb);
25975 vc->rx_iov = NULL;
25976 recycle_iov_buf(card, iovb);
25977 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25978 ("nicstar%d: Expected a large buffer, and this is not one.\n",
25979 card->index);
25980 which_list(card, skb);
25981 - atomic_inc(&vcc->stats->rx_err);
25982 + atomic_inc_unchecked(&vcc->stats->rx_err);
25983 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25984 NS_PRV_IOVCNT(iovb));
25985 vc->rx_iov = NULL;
25986 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25987 printk(" - PDU size mismatch.\n");
25988 else
25989 printk(".\n");
25990 - atomic_inc(&vcc->stats->rx_err);
25991 + atomic_inc_unchecked(&vcc->stats->rx_err);
25992 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25993 NS_PRV_IOVCNT(iovb));
25994 vc->rx_iov = NULL;
25995 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25996 /* skb points to a small buffer */
25997 if (!atm_charge(vcc, skb->truesize)) {
25998 push_rxbufs(card, skb);
25999 - atomic_inc(&vcc->stats->rx_drop);
26000 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26001 } else {
26002 skb_put(skb, len);
26003 dequeue_sm_buf(card, skb);
26004 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26005 ATM_SKB(skb)->vcc = vcc;
26006 __net_timestamp(skb);
26007 vcc->push(vcc, skb);
26008 - atomic_inc(&vcc->stats->rx);
26009 + atomic_inc_unchecked(&vcc->stats->rx);
26010 }
26011 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26012 struct sk_buff *sb;
26013 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26014 if (len <= NS_SMBUFSIZE) {
26015 if (!atm_charge(vcc, sb->truesize)) {
26016 push_rxbufs(card, sb);
26017 - atomic_inc(&vcc->stats->rx_drop);
26018 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26019 } else {
26020 skb_put(sb, len);
26021 dequeue_sm_buf(card, sb);
26022 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26023 ATM_SKB(sb)->vcc = vcc;
26024 __net_timestamp(sb);
26025 vcc->push(vcc, sb);
26026 - atomic_inc(&vcc->stats->rx);
26027 + atomic_inc_unchecked(&vcc->stats->rx);
26028 }
26029
26030 push_rxbufs(card, skb);
26031 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26032
26033 if (!atm_charge(vcc, skb->truesize)) {
26034 push_rxbufs(card, skb);
26035 - atomic_inc(&vcc->stats->rx_drop);
26036 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26037 } else {
26038 dequeue_lg_buf(card, skb);
26039 #ifdef NS_USE_DESTRUCTORS
26040 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26041 ATM_SKB(skb)->vcc = vcc;
26042 __net_timestamp(skb);
26043 vcc->push(vcc, skb);
26044 - atomic_inc(&vcc->stats->rx);
26045 + atomic_inc_unchecked(&vcc->stats->rx);
26046 }
26047
26048 push_rxbufs(card, sb);
26049 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26050 printk
26051 ("nicstar%d: Out of huge buffers.\n",
26052 card->index);
26053 - atomic_inc(&vcc->stats->rx_drop);
26054 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26055 recycle_iovec_rx_bufs(card,
26056 (struct iovec *)
26057 iovb->data,
26058 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26059 card->hbpool.count++;
26060 } else
26061 dev_kfree_skb_any(hb);
26062 - atomic_inc(&vcc->stats->rx_drop);
26063 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26064 } else {
26065 /* Copy the small buffer to the huge buffer */
26066 sb = (struct sk_buff *)iov->iov_base;
26067 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26068 #endif /* NS_USE_DESTRUCTORS */
26069 __net_timestamp(hb);
26070 vcc->push(vcc, hb);
26071 - atomic_inc(&vcc->stats->rx);
26072 + atomic_inc_unchecked(&vcc->stats->rx);
26073 }
26074 }
26075
26076 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26077 index 5d1d076..12fbca4 100644
26078 --- a/drivers/atm/solos-pci.c
26079 +++ b/drivers/atm/solos-pci.c
26080 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26081 }
26082 atm_charge(vcc, skb->truesize);
26083 vcc->push(vcc, skb);
26084 - atomic_inc(&vcc->stats->rx);
26085 + atomic_inc_unchecked(&vcc->stats->rx);
26086 break;
26087
26088 case PKT_STATUS:
26089 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26090 vcc = SKB_CB(oldskb)->vcc;
26091
26092 if (vcc) {
26093 - atomic_inc(&vcc->stats->tx);
26094 + atomic_inc_unchecked(&vcc->stats->tx);
26095 solos_pop(vcc, oldskb);
26096 } else
26097 dev_kfree_skb_irq(oldskb);
26098 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26099 index 90f1ccc..04c4a1e 100644
26100 --- a/drivers/atm/suni.c
26101 +++ b/drivers/atm/suni.c
26102 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26103
26104
26105 #define ADD_LIMITED(s,v) \
26106 - atomic_add((v),&stats->s); \
26107 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26108 + atomic_add_unchecked((v),&stats->s); \
26109 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26110
26111
26112 static void suni_hz(unsigned long from_timer)
26113 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26114 index 5120a96..e2572bd 100644
26115 --- a/drivers/atm/uPD98402.c
26116 +++ b/drivers/atm/uPD98402.c
26117 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26118 struct sonet_stats tmp;
26119 int error = 0;
26120
26121 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26122 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26123 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26124 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26125 if (zero && !error) {
26126 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26127
26128
26129 #define ADD_LIMITED(s,v) \
26130 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26131 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26132 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26133 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26134 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26135 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26136
26137
26138 static void stat_event(struct atm_dev *dev)
26139 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26140 if (reason & uPD98402_INT_PFM) stat_event(dev);
26141 if (reason & uPD98402_INT_PCO) {
26142 (void) GET(PCOCR); /* clear interrupt cause */
26143 - atomic_add(GET(HECCT),
26144 + atomic_add_unchecked(GET(HECCT),
26145 &PRIV(dev)->sonet_stats.uncorr_hcs);
26146 }
26147 if ((reason & uPD98402_INT_RFO) &&
26148 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26149 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26150 uPD98402_INT_LOS),PIMR); /* enable them */
26151 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26152 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26153 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26154 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26155 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26156 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26157 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26158 return 0;
26159 }
26160
26161 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26162 index d889f56..17eb71e 100644
26163 --- a/drivers/atm/zatm.c
26164 +++ b/drivers/atm/zatm.c
26165 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26166 }
26167 if (!size) {
26168 dev_kfree_skb_irq(skb);
26169 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26170 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26171 continue;
26172 }
26173 if (!atm_charge(vcc,skb->truesize)) {
26174 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26175 skb->len = size;
26176 ATM_SKB(skb)->vcc = vcc;
26177 vcc->push(vcc,skb);
26178 - atomic_inc(&vcc->stats->rx);
26179 + atomic_inc_unchecked(&vcc->stats->rx);
26180 }
26181 zout(pos & 0xffff,MTA(mbx));
26182 #if 0 /* probably a stupid idea */
26183 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26184 skb_queue_head(&zatm_vcc->backlog,skb);
26185 break;
26186 }
26187 - atomic_inc(&vcc->stats->tx);
26188 + atomic_inc_unchecked(&vcc->stats->tx);
26189 wake_up(&zatm_vcc->tx_wait);
26190 }
26191
26192 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26193 index a4760e0..51283cf 100644
26194 --- a/drivers/base/devtmpfs.c
26195 +++ b/drivers/base/devtmpfs.c
26196 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26197 if (!thread)
26198 return 0;
26199
26200 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26201 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26202 if (err)
26203 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26204 else
26205 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26206 index caf995f..6f76697 100644
26207 --- a/drivers/base/power/wakeup.c
26208 +++ b/drivers/base/power/wakeup.c
26209 @@ -30,14 +30,14 @@ bool events_check_enabled;
26210 * They need to be modified together atomically, so it's better to use one
26211 * atomic variable to hold them both.
26212 */
26213 -static atomic_t combined_event_count = ATOMIC_INIT(0);
26214 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26215
26216 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26217 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26218
26219 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26220 {
26221 - unsigned int comb = atomic_read(&combined_event_count);
26222 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
26223
26224 *cnt = (comb >> IN_PROGRESS_BITS);
26225 *inpr = comb & MAX_IN_PROGRESS;
26226 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26227 ws->last_time = ktime_get();
26228
26229 /* Increment the counter of events in progress. */
26230 - atomic_inc(&combined_event_count);
26231 + atomic_inc_unchecked(&combined_event_count);
26232 }
26233
26234 /**
26235 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26236 * Increment the counter of registered wakeup events and decrement the
26237 * couter of wakeup events in progress simultaneously.
26238 */
26239 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26240 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26241 }
26242
26243 /**
26244 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26245 index b0f553b..77b928b 100644
26246 --- a/drivers/block/cciss.c
26247 +++ b/drivers/block/cciss.c
26248 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26249 int err;
26250 u32 cp;
26251
26252 + memset(&arg64, 0, sizeof(arg64));
26253 +
26254 err = 0;
26255 err |=
26256 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26257 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26258 while (!list_empty(&h->reqQ)) {
26259 c = list_entry(h->reqQ.next, CommandList_struct, list);
26260 /* can't do anything if fifo is full */
26261 - if ((h->access.fifo_full(h))) {
26262 + if ((h->access->fifo_full(h))) {
26263 dev_warn(&h->pdev->dev, "fifo full\n");
26264 break;
26265 }
26266 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26267 h->Qdepth--;
26268
26269 /* Tell the controller execute command */
26270 - h->access.submit_command(h, c);
26271 + h->access->submit_command(h, c);
26272
26273 /* Put job onto the completed Q */
26274 addQ(&h->cmpQ, c);
26275 @@ -3443,17 +3445,17 @@ startio:
26276
26277 static inline unsigned long get_next_completion(ctlr_info_t *h)
26278 {
26279 - return h->access.command_completed(h);
26280 + return h->access->command_completed(h);
26281 }
26282
26283 static inline int interrupt_pending(ctlr_info_t *h)
26284 {
26285 - return h->access.intr_pending(h);
26286 + return h->access->intr_pending(h);
26287 }
26288
26289 static inline long interrupt_not_for_us(ctlr_info_t *h)
26290 {
26291 - return ((h->access.intr_pending(h) == 0) ||
26292 + return ((h->access->intr_pending(h) == 0) ||
26293 (h->interrupts_enabled == 0));
26294 }
26295
26296 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26297 u32 a;
26298
26299 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26300 - return h->access.command_completed(h);
26301 + return h->access->command_completed(h);
26302
26303 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26304 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26305 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26306 trans_support & CFGTBL_Trans_use_short_tags);
26307
26308 /* Change the access methods to the performant access methods */
26309 - h->access = SA5_performant_access;
26310 + h->access = &SA5_performant_access;
26311 h->transMethod = CFGTBL_Trans_Performant;
26312
26313 return;
26314 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26315 if (prod_index < 0)
26316 return -ENODEV;
26317 h->product_name = products[prod_index].product_name;
26318 - h->access = *(products[prod_index].access);
26319 + h->access = products[prod_index].access;
26320
26321 if (cciss_board_disabled(h)) {
26322 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26323 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26324 }
26325
26326 /* make sure the board interrupts are off */
26327 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26328 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26329 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26330 if (rc)
26331 goto clean2;
26332 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26333 * fake ones to scoop up any residual completions.
26334 */
26335 spin_lock_irqsave(&h->lock, flags);
26336 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26337 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26338 spin_unlock_irqrestore(&h->lock, flags);
26339 free_irq(h->intr[h->intr_mode], h);
26340 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26341 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26342 dev_info(&h->pdev->dev, "Board READY.\n");
26343 dev_info(&h->pdev->dev,
26344 "Waiting for stale completions to drain.\n");
26345 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26346 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26347 msleep(10000);
26348 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26349 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26350
26351 rc = controller_reset_failed(h->cfgtable);
26352 if (rc)
26353 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26354 cciss_scsi_setup(h);
26355
26356 /* Turn the interrupts on so we can service requests */
26357 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26358 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26359
26360 /* Get the firmware version */
26361 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26362 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26363 kfree(flush_buf);
26364 if (return_code != IO_OK)
26365 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26366 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26367 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26368 free_irq(h->intr[h->intr_mode], h);
26369 }
26370
26371 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26372 index 7fda30e..eb5dfe0 100644
26373 --- a/drivers/block/cciss.h
26374 +++ b/drivers/block/cciss.h
26375 @@ -101,7 +101,7 @@ struct ctlr_info
26376 /* information about each logical volume */
26377 drive_info_struct *drv[CISS_MAX_LUN];
26378
26379 - struct access_method access;
26380 + struct access_method *access;
26381
26382 /* queue and queue Info */
26383 struct list_head reqQ;
26384 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26385 index 9125bbe..eede5c8 100644
26386 --- a/drivers/block/cpqarray.c
26387 +++ b/drivers/block/cpqarray.c
26388 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26389 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26390 goto Enomem4;
26391 }
26392 - hba[i]->access.set_intr_mask(hba[i], 0);
26393 + hba[i]->access->set_intr_mask(hba[i], 0);
26394 if (request_irq(hba[i]->intr, do_ida_intr,
26395 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26396 {
26397 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26398 add_timer(&hba[i]->timer);
26399
26400 /* Enable IRQ now that spinlock and rate limit timer are set up */
26401 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26402 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26403
26404 for(j=0; j<NWD; j++) {
26405 struct gendisk *disk = ida_gendisk[i][j];
26406 @@ -694,7 +694,7 @@ DBGINFO(
26407 for(i=0; i<NR_PRODUCTS; i++) {
26408 if (board_id == products[i].board_id) {
26409 c->product_name = products[i].product_name;
26410 - c->access = *(products[i].access);
26411 + c->access = products[i].access;
26412 break;
26413 }
26414 }
26415 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26416 hba[ctlr]->intr = intr;
26417 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26418 hba[ctlr]->product_name = products[j].product_name;
26419 - hba[ctlr]->access = *(products[j].access);
26420 + hba[ctlr]->access = products[j].access;
26421 hba[ctlr]->ctlr = ctlr;
26422 hba[ctlr]->board_id = board_id;
26423 hba[ctlr]->pci_dev = NULL; /* not PCI */
26424 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26425
26426 while((c = h->reqQ) != NULL) {
26427 /* Can't do anything if we're busy */
26428 - if (h->access.fifo_full(h) == 0)
26429 + if (h->access->fifo_full(h) == 0)
26430 return;
26431
26432 /* Get the first entry from the request Q */
26433 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26434 h->Qdepth--;
26435
26436 /* Tell the controller to do our bidding */
26437 - h->access.submit_command(h, c);
26438 + h->access->submit_command(h, c);
26439
26440 /* Get onto the completion Q */
26441 addQ(&h->cmpQ, c);
26442 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26443 unsigned long flags;
26444 __u32 a,a1;
26445
26446 - istat = h->access.intr_pending(h);
26447 + istat = h->access->intr_pending(h);
26448 /* Is this interrupt for us? */
26449 if (istat == 0)
26450 return IRQ_NONE;
26451 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26452 */
26453 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26454 if (istat & FIFO_NOT_EMPTY) {
26455 - while((a = h->access.command_completed(h))) {
26456 + while((a = h->access->command_completed(h))) {
26457 a1 = a; a &= ~3;
26458 if ((c = h->cmpQ) == NULL)
26459 {
26460 @@ -1449,11 +1449,11 @@ static int sendcmd(
26461 /*
26462 * Disable interrupt
26463 */
26464 - info_p->access.set_intr_mask(info_p, 0);
26465 + info_p->access->set_intr_mask(info_p, 0);
26466 /* Make sure there is room in the command FIFO */
26467 /* Actually it should be completely empty at this time. */
26468 for (i = 200000; i > 0; i--) {
26469 - temp = info_p->access.fifo_full(info_p);
26470 + temp = info_p->access->fifo_full(info_p);
26471 if (temp != 0) {
26472 break;
26473 }
26474 @@ -1466,7 +1466,7 @@ DBG(
26475 /*
26476 * Send the cmd
26477 */
26478 - info_p->access.submit_command(info_p, c);
26479 + info_p->access->submit_command(info_p, c);
26480 complete = pollcomplete(ctlr);
26481
26482 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26483 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26484 * we check the new geometry. Then turn interrupts back on when
26485 * we're done.
26486 */
26487 - host->access.set_intr_mask(host, 0);
26488 + host->access->set_intr_mask(host, 0);
26489 getgeometry(ctlr);
26490 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26491 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26492
26493 for(i=0; i<NWD; i++) {
26494 struct gendisk *disk = ida_gendisk[ctlr][i];
26495 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26496 /* Wait (up to 2 seconds) for a command to complete */
26497
26498 for (i = 200000; i > 0; i--) {
26499 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
26500 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
26501 if (done == 0) {
26502 udelay(10); /* a short fixed delay */
26503 } else
26504 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26505 index be73e9d..7fbf140 100644
26506 --- a/drivers/block/cpqarray.h
26507 +++ b/drivers/block/cpqarray.h
26508 @@ -99,7 +99,7 @@ struct ctlr_info {
26509 drv_info_t drv[NWD];
26510 struct proc_dir_entry *proc;
26511
26512 - struct access_method access;
26513 + struct access_method *access;
26514
26515 cmdlist_t *reqQ;
26516 cmdlist_t *cmpQ;
26517 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26518 index 9cf2035..bffca95 100644
26519 --- a/drivers/block/drbd/drbd_int.h
26520 +++ b/drivers/block/drbd/drbd_int.h
26521 @@ -736,7 +736,7 @@ struct drbd_request;
26522 struct drbd_epoch {
26523 struct list_head list;
26524 unsigned int barrier_nr;
26525 - atomic_t epoch_size; /* increased on every request added. */
26526 + atomic_unchecked_t epoch_size; /* increased on every request added. */
26527 atomic_t active; /* increased on every req. added, and dec on every finished. */
26528 unsigned long flags;
26529 };
26530 @@ -1108,7 +1108,7 @@ struct drbd_conf {
26531 void *int_dig_in;
26532 void *int_dig_vv;
26533 wait_queue_head_t seq_wait;
26534 - atomic_t packet_seq;
26535 + atomic_unchecked_t packet_seq;
26536 unsigned int peer_seq;
26537 spinlock_t peer_seq_lock;
26538 unsigned int minor;
26539 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26540
26541 static inline void drbd_tcp_cork(struct socket *sock)
26542 {
26543 - int __user val = 1;
26544 + int val = 1;
26545 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26546 - (char __user *)&val, sizeof(val));
26547 + (char __force_user *)&val, sizeof(val));
26548 }
26549
26550 static inline void drbd_tcp_uncork(struct socket *sock)
26551 {
26552 - int __user val = 0;
26553 + int val = 0;
26554 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26555 - (char __user *)&val, sizeof(val));
26556 + (char __force_user *)&val, sizeof(val));
26557 }
26558
26559 static inline void drbd_tcp_nodelay(struct socket *sock)
26560 {
26561 - int __user val = 1;
26562 + int val = 1;
26563 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26564 - (char __user *)&val, sizeof(val));
26565 + (char __force_user *)&val, sizeof(val));
26566 }
26567
26568 static inline void drbd_tcp_quickack(struct socket *sock)
26569 {
26570 - int __user val = 2;
26571 + int val = 2;
26572 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26573 - (char __user *)&val, sizeof(val));
26574 + (char __force_user *)&val, sizeof(val));
26575 }
26576
26577 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26578 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26579 index 0358e55..bc33689 100644
26580 --- a/drivers/block/drbd/drbd_main.c
26581 +++ b/drivers/block/drbd/drbd_main.c
26582 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26583 p.sector = sector;
26584 p.block_id = block_id;
26585 p.blksize = blksize;
26586 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26587 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26588
26589 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26590 return false;
26591 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26592 p.sector = cpu_to_be64(req->sector);
26593 p.block_id = (unsigned long)req;
26594 p.seq_num = cpu_to_be32(req->seq_num =
26595 - atomic_add_return(1, &mdev->packet_seq));
26596 + atomic_add_return_unchecked(1, &mdev->packet_seq));
26597
26598 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26599
26600 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26601 atomic_set(&mdev->unacked_cnt, 0);
26602 atomic_set(&mdev->local_cnt, 0);
26603 atomic_set(&mdev->net_cnt, 0);
26604 - atomic_set(&mdev->packet_seq, 0);
26605 + atomic_set_unchecked(&mdev->packet_seq, 0);
26606 atomic_set(&mdev->pp_in_use, 0);
26607 atomic_set(&mdev->pp_in_use_by_net, 0);
26608 atomic_set(&mdev->rs_sect_in, 0);
26609 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26610 mdev->receiver.t_state);
26611
26612 /* no need to lock it, I'm the only thread alive */
26613 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26614 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26615 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26616 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26617 mdev->al_writ_cnt =
26618 mdev->bm_writ_cnt =
26619 mdev->read_cnt =
26620 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26621 index af2a250..219c74b 100644
26622 --- a/drivers/block/drbd/drbd_nl.c
26623 +++ b/drivers/block/drbd/drbd_nl.c
26624 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26625 module_put(THIS_MODULE);
26626 }
26627
26628 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26629 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26630
26631 static unsigned short *
26632 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26633 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26634 cn_reply->id.idx = CN_IDX_DRBD;
26635 cn_reply->id.val = CN_VAL_DRBD;
26636
26637 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26638 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26639 cn_reply->ack = 0; /* not used here. */
26640 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26641 (int)((char *)tl - (char *)reply->tag_list);
26642 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26643 cn_reply->id.idx = CN_IDX_DRBD;
26644 cn_reply->id.val = CN_VAL_DRBD;
26645
26646 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26647 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26648 cn_reply->ack = 0; /* not used here. */
26649 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26650 (int)((char *)tl - (char *)reply->tag_list);
26651 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26652 cn_reply->id.idx = CN_IDX_DRBD;
26653 cn_reply->id.val = CN_VAL_DRBD;
26654
26655 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26656 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26657 cn_reply->ack = 0; // not used here.
26658 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26659 (int)((char*)tl - (char*)reply->tag_list);
26660 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26661 cn_reply->id.idx = CN_IDX_DRBD;
26662 cn_reply->id.val = CN_VAL_DRBD;
26663
26664 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26665 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26666 cn_reply->ack = 0; /* not used here. */
26667 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26668 (int)((char *)tl - (char *)reply->tag_list);
26669 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26670 index 43beaca..4a5b1dd 100644
26671 --- a/drivers/block/drbd/drbd_receiver.c
26672 +++ b/drivers/block/drbd/drbd_receiver.c
26673 @@ -894,7 +894,7 @@ retry:
26674 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26675 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26676
26677 - atomic_set(&mdev->packet_seq, 0);
26678 + atomic_set_unchecked(&mdev->packet_seq, 0);
26679 mdev->peer_seq = 0;
26680
26681 drbd_thread_start(&mdev->asender);
26682 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26683 do {
26684 next_epoch = NULL;
26685
26686 - epoch_size = atomic_read(&epoch->epoch_size);
26687 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26688
26689 switch (ev & ~EV_CLEANUP) {
26690 case EV_PUT:
26691 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26692 rv = FE_DESTROYED;
26693 } else {
26694 epoch->flags = 0;
26695 - atomic_set(&epoch->epoch_size, 0);
26696 + atomic_set_unchecked(&epoch->epoch_size, 0);
26697 /* atomic_set(&epoch->active, 0); is already zero */
26698 if (rv == FE_STILL_LIVE)
26699 rv = FE_RECYCLED;
26700 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26701 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26702 drbd_flush(mdev);
26703
26704 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26705 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26706 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26707 if (epoch)
26708 break;
26709 }
26710
26711 epoch = mdev->current_epoch;
26712 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26713 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26714
26715 D_ASSERT(atomic_read(&epoch->active) == 0);
26716 D_ASSERT(epoch->flags == 0);
26717 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26718 }
26719
26720 epoch->flags = 0;
26721 - atomic_set(&epoch->epoch_size, 0);
26722 + atomic_set_unchecked(&epoch->epoch_size, 0);
26723 atomic_set(&epoch->active, 0);
26724
26725 spin_lock(&mdev->epoch_lock);
26726 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26727 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26728 list_add(&epoch->list, &mdev->current_epoch->list);
26729 mdev->current_epoch = epoch;
26730 mdev->epochs++;
26731 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26732 spin_unlock(&mdev->peer_seq_lock);
26733
26734 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26735 - atomic_inc(&mdev->current_epoch->epoch_size);
26736 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26737 return drbd_drain_block(mdev, data_size);
26738 }
26739
26740 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26741
26742 spin_lock(&mdev->epoch_lock);
26743 e->epoch = mdev->current_epoch;
26744 - atomic_inc(&e->epoch->epoch_size);
26745 + atomic_inc_unchecked(&e->epoch->epoch_size);
26746 atomic_inc(&e->epoch->active);
26747 spin_unlock(&mdev->epoch_lock);
26748
26749 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26750 D_ASSERT(list_empty(&mdev->done_ee));
26751
26752 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26753 - atomic_set(&mdev->current_epoch->epoch_size, 0);
26754 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26755 D_ASSERT(list_empty(&mdev->current_epoch->list));
26756 }
26757
26758 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26759 index 1e888c9..05cf1b0 100644
26760 --- a/drivers/block/loop.c
26761 +++ b/drivers/block/loop.c
26762 @@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
26763 mm_segment_t old_fs = get_fs();
26764
26765 set_fs(get_ds());
26766 - bw = file->f_op->write(file, buf, len, &pos);
26767 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26768 set_fs(old_fs);
26769 if (likely(bw == len))
26770 return 0;
26771 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26772 index 4364303..9adf4ee 100644
26773 --- a/drivers/char/Kconfig
26774 +++ b/drivers/char/Kconfig
26775 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26776
26777 config DEVKMEM
26778 bool "/dev/kmem virtual device support"
26779 - default y
26780 + default n
26781 + depends on !GRKERNSEC_KMEM
26782 help
26783 Say Y here if you want to support the /dev/kmem device. The
26784 /dev/kmem device is rarely used, but can be used for certain
26785 @@ -596,6 +597,7 @@ config DEVPORT
26786 bool
26787 depends on !M68K
26788 depends on ISA || PCI
26789 + depends on !GRKERNSEC_KMEM
26790 default y
26791
26792 source "drivers/s390/char/Kconfig"
26793 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26794 index 2e04433..22afc64 100644
26795 --- a/drivers/char/agp/frontend.c
26796 +++ b/drivers/char/agp/frontend.c
26797 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
26798 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26799 return -EFAULT;
26800
26801 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26802 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26803 return -EFAULT;
26804
26805 client = agp_find_client_by_pid(reserve.pid);
26806 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26807 index 095ab90..afad0a4 100644
26808 --- a/drivers/char/briq_panel.c
26809 +++ b/drivers/char/briq_panel.c
26810 @@ -9,6 +9,7 @@
26811 #include <linux/types.h>
26812 #include <linux/errno.h>
26813 #include <linux/tty.h>
26814 +#include <linux/mutex.h>
26815 #include <linux/timer.h>
26816 #include <linux/kernel.h>
26817 #include <linux/wait.h>
26818 @@ -34,6 +35,7 @@ static int vfd_is_open;
26819 static unsigned char vfd[40];
26820 static int vfd_cursor;
26821 static unsigned char ledpb, led;
26822 +static DEFINE_MUTEX(vfd_mutex);
26823
26824 static void update_vfd(void)
26825 {
26826 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26827 if (!vfd_is_open)
26828 return -EBUSY;
26829
26830 + mutex_lock(&vfd_mutex);
26831 for (;;) {
26832 char c;
26833 if (!indx)
26834 break;
26835 - if (get_user(c, buf))
26836 + if (get_user(c, buf)) {
26837 + mutex_unlock(&vfd_mutex);
26838 return -EFAULT;
26839 + }
26840 if (esc) {
26841 set_led(c);
26842 esc = 0;
26843 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26844 buf++;
26845 }
26846 update_vfd();
26847 + mutex_unlock(&vfd_mutex);
26848
26849 return len;
26850 }
26851 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26852 index f773a9d..65cd683 100644
26853 --- a/drivers/char/genrtc.c
26854 +++ b/drivers/char/genrtc.c
26855 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
26856 switch (cmd) {
26857
26858 case RTC_PLL_GET:
26859 + memset(&pll, 0, sizeof(pll));
26860 if (get_rtc_pll(&pll))
26861 return -EINVAL;
26862 else
26863 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26864 index 0833896..cccce52 100644
26865 --- a/drivers/char/hpet.c
26866 +++ b/drivers/char/hpet.c
26867 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
26868 }
26869
26870 static int
26871 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26872 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26873 struct hpet_info *info)
26874 {
26875 struct hpet_timer __iomem *timer;
26876 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26877 index 58c0e63..46c16bf 100644
26878 --- a/drivers/char/ipmi/ipmi_msghandler.c
26879 +++ b/drivers/char/ipmi/ipmi_msghandler.c
26880 @@ -415,7 +415,7 @@ struct ipmi_smi {
26881 struct proc_dir_entry *proc_dir;
26882 char proc_dir_name[10];
26883
26884 - atomic_t stats[IPMI_NUM_STATS];
26885 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26886
26887 /*
26888 * run_to_completion duplicate of smb_info, smi_info
26889 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26890
26891
26892 #define ipmi_inc_stat(intf, stat) \
26893 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26894 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26895 #define ipmi_get_stat(intf, stat) \
26896 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26897 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26898
26899 static int is_lan_addr(struct ipmi_addr *addr)
26900 {
26901 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
26902 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26903 init_waitqueue_head(&intf->waitq);
26904 for (i = 0; i < IPMI_NUM_STATS; i++)
26905 - atomic_set(&intf->stats[i], 0);
26906 + atomic_set_unchecked(&intf->stats[i], 0);
26907
26908 intf->proc_dir = NULL;
26909
26910 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
26911 index 9397ab4..d01bee1 100644
26912 --- a/drivers/char/ipmi/ipmi_si_intf.c
26913 +++ b/drivers/char/ipmi/ipmi_si_intf.c
26914 @@ -277,7 +277,7 @@ struct smi_info {
26915 unsigned char slave_addr;
26916
26917 /* Counters and things for the proc filesystem. */
26918 - atomic_t stats[SI_NUM_STATS];
26919 + atomic_unchecked_t stats[SI_NUM_STATS];
26920
26921 struct task_struct *thread;
26922
26923 @@ -286,9 +286,9 @@ struct smi_info {
26924 };
26925
26926 #define smi_inc_stat(smi, stat) \
26927 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26928 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26929 #define smi_get_stat(smi, stat) \
26930 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26931 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26932
26933 #define SI_MAX_PARMS 4
26934
26935 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
26936 atomic_set(&new_smi->req_events, 0);
26937 new_smi->run_to_completion = 0;
26938 for (i = 0; i < SI_NUM_STATS; i++)
26939 - atomic_set(&new_smi->stats[i], 0);
26940 + atomic_set_unchecked(&new_smi->stats[i], 0);
26941
26942 new_smi->interrupt_disabled = 1;
26943 atomic_set(&new_smi->stop_operation, 0);
26944 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
26945 index 1aeaaba..e018570 100644
26946 --- a/drivers/char/mbcs.c
26947 +++ b/drivers/char/mbcs.c
26948 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
26949 return 0;
26950 }
26951
26952 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
26953 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
26954 {
26955 .part_num = MBCS_PART_NUM,
26956 .mfg_num = MBCS_MFG_NUM,
26957 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
26958 index 1451790..f705c30 100644
26959 --- a/drivers/char/mem.c
26960 +++ b/drivers/char/mem.c
26961 @@ -18,6 +18,7 @@
26962 #include <linux/raw.h>
26963 #include <linux/tty.h>
26964 #include <linux/capability.h>
26965 +#include <linux/security.h>
26966 #include <linux/ptrace.h>
26967 #include <linux/device.h>
26968 #include <linux/highmem.h>
26969 @@ -35,6 +36,10 @@
26970 # include <linux/efi.h>
26971 #endif
26972
26973 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26974 +extern const struct file_operations grsec_fops;
26975 +#endif
26976 +
26977 static inline unsigned long size_inside_page(unsigned long start,
26978 unsigned long size)
26979 {
26980 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26981
26982 while (cursor < to) {
26983 if (!devmem_is_allowed(pfn)) {
26984 +#ifdef CONFIG_GRKERNSEC_KMEM
26985 + gr_handle_mem_readwrite(from, to);
26986 +#else
26987 printk(KERN_INFO
26988 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26989 current->comm, from, to);
26990 +#endif
26991 return 0;
26992 }
26993 cursor += PAGE_SIZE;
26994 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26995 }
26996 return 1;
26997 }
26998 +#elif defined(CONFIG_GRKERNSEC_KMEM)
26999 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27000 +{
27001 + return 0;
27002 +}
27003 #else
27004 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27005 {
27006 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27007
27008 while (count > 0) {
27009 unsigned long remaining;
27010 + char *temp;
27011
27012 sz = size_inside_page(p, count);
27013
27014 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27015 if (!ptr)
27016 return -EFAULT;
27017
27018 - remaining = copy_to_user(buf, ptr, sz);
27019 +#ifdef CONFIG_PAX_USERCOPY
27020 + temp = kmalloc(sz, GFP_KERNEL);
27021 + if (!temp) {
27022 + unxlate_dev_mem_ptr(p, ptr);
27023 + return -ENOMEM;
27024 + }
27025 + memcpy(temp, ptr, sz);
27026 +#else
27027 + temp = ptr;
27028 +#endif
27029 +
27030 + remaining = copy_to_user(buf, temp, sz);
27031 +
27032 +#ifdef CONFIG_PAX_USERCOPY
27033 + kfree(temp);
27034 +#endif
27035 +
27036 unxlate_dev_mem_ptr(p, ptr);
27037 if (remaining)
27038 return -EFAULT;
27039 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27040 size_t count, loff_t *ppos)
27041 {
27042 unsigned long p = *ppos;
27043 - ssize_t low_count, read, sz;
27044 + ssize_t low_count, read, sz, err = 0;
27045 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27046 - int err = 0;
27047
27048 read = 0;
27049 if (p < (unsigned long) high_memory) {
27050 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27051 }
27052 #endif
27053 while (low_count > 0) {
27054 + char *temp;
27055 +
27056 sz = size_inside_page(p, low_count);
27057
27058 /*
27059 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27060 */
27061 kbuf = xlate_dev_kmem_ptr((char *)p);
27062
27063 - if (copy_to_user(buf, kbuf, sz))
27064 +#ifdef CONFIG_PAX_USERCOPY
27065 + temp = kmalloc(sz, GFP_KERNEL);
27066 + if (!temp)
27067 + return -ENOMEM;
27068 + memcpy(temp, kbuf, sz);
27069 +#else
27070 + temp = kbuf;
27071 +#endif
27072 +
27073 + err = copy_to_user(buf, temp, sz);
27074 +
27075 +#ifdef CONFIG_PAX_USERCOPY
27076 + kfree(temp);
27077 +#endif
27078 +
27079 + if (err)
27080 return -EFAULT;
27081 buf += sz;
27082 p += sz;
27083 @@ -867,6 +914,9 @@ static const struct memdev {
27084 #ifdef CONFIG_CRASH_DUMP
27085 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27086 #endif
27087 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27088 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27089 +#endif
27090 };
27091
27092 static int memory_open(struct inode *inode, struct file *filp)
27093 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27094 index da3cfee..a5a6606 100644
27095 --- a/drivers/char/nvram.c
27096 +++ b/drivers/char/nvram.c
27097 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27098
27099 spin_unlock_irq(&rtc_lock);
27100
27101 - if (copy_to_user(buf, contents, tmp - contents))
27102 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27103 return -EFAULT;
27104
27105 *ppos = i;
27106 diff --git a/drivers/char/random.c b/drivers/char/random.c
27107 index 6035ab8..bdfe4fd 100644
27108 --- a/drivers/char/random.c
27109 +++ b/drivers/char/random.c
27110 @@ -261,8 +261,13 @@
27111 /*
27112 * Configuration information
27113 */
27114 +#ifdef CONFIG_GRKERNSEC_RANDNET
27115 +#define INPUT_POOL_WORDS 512
27116 +#define OUTPUT_POOL_WORDS 128
27117 +#else
27118 #define INPUT_POOL_WORDS 128
27119 #define OUTPUT_POOL_WORDS 32
27120 +#endif
27121 #define SEC_XFER_SIZE 512
27122 #define EXTRACT_SIZE 10
27123
27124 @@ -300,10 +305,17 @@ static struct poolinfo {
27125 int poolwords;
27126 int tap1, tap2, tap3, tap4, tap5;
27127 } poolinfo_table[] = {
27128 +#ifdef CONFIG_GRKERNSEC_RANDNET
27129 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27130 + { 512, 411, 308, 208, 104, 1 },
27131 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27132 + { 128, 103, 76, 51, 25, 1 },
27133 +#else
27134 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27135 { 128, 103, 76, 51, 25, 1 },
27136 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27137 { 32, 26, 20, 14, 7, 1 },
27138 +#endif
27139 #if 0
27140 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27141 { 2048, 1638, 1231, 819, 411, 1 },
27142 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27143
27144 extract_buf(r, tmp);
27145 i = min_t(int, nbytes, EXTRACT_SIZE);
27146 - if (copy_to_user(buf, tmp, i)) {
27147 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27148 ret = -EFAULT;
27149 break;
27150 }
27151 @@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27152 #include <linux/sysctl.h>
27153
27154 static int min_read_thresh = 8, min_write_thresh;
27155 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27156 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27157 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27158 static char sysctl_bootid[16];
27159
27160 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27161 index 1ee8ce7..b778bef 100644
27162 --- a/drivers/char/sonypi.c
27163 +++ b/drivers/char/sonypi.c
27164 @@ -55,6 +55,7 @@
27165 #include <asm/uaccess.h>
27166 #include <asm/io.h>
27167 #include <asm/system.h>
27168 +#include <asm/local.h>
27169
27170 #include <linux/sonypi.h>
27171
27172 @@ -491,7 +492,7 @@ static struct sonypi_device {
27173 spinlock_t fifo_lock;
27174 wait_queue_head_t fifo_proc_list;
27175 struct fasync_struct *fifo_async;
27176 - int open_count;
27177 + local_t open_count;
27178 int model;
27179 struct input_dev *input_jog_dev;
27180 struct input_dev *input_key_dev;
27181 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27182 static int sonypi_misc_release(struct inode *inode, struct file *file)
27183 {
27184 mutex_lock(&sonypi_device.lock);
27185 - sonypi_device.open_count--;
27186 + local_dec(&sonypi_device.open_count);
27187 mutex_unlock(&sonypi_device.lock);
27188 return 0;
27189 }
27190 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27191 {
27192 mutex_lock(&sonypi_device.lock);
27193 /* Flush input queue on first open */
27194 - if (!sonypi_device.open_count)
27195 + if (!local_read(&sonypi_device.open_count))
27196 kfifo_reset(&sonypi_device.fifo);
27197 - sonypi_device.open_count++;
27198 + local_inc(&sonypi_device.open_count);
27199 mutex_unlock(&sonypi_device.lock);
27200
27201 return 0;
27202 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27203 index 361a1df..2471eee 100644
27204 --- a/drivers/char/tpm/tpm.c
27205 +++ b/drivers/char/tpm/tpm.c
27206 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27207 chip->vendor.req_complete_val)
27208 goto out_recv;
27209
27210 - if ((status == chip->vendor.req_canceled)) {
27211 + if (status == chip->vendor.req_canceled) {
27212 dev_err(chip->dev, "Operation Canceled\n");
27213 rc = -ECANCELED;
27214 goto out;
27215 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27216 index 0636520..169c1d0 100644
27217 --- a/drivers/char/tpm/tpm_bios.c
27218 +++ b/drivers/char/tpm/tpm_bios.c
27219 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27220 event = addr;
27221
27222 if ((event->event_type == 0 && event->event_size == 0) ||
27223 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27224 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27225 return NULL;
27226
27227 return addr;
27228 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27229 return NULL;
27230
27231 if ((event->event_type == 0 && event->event_size == 0) ||
27232 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27233 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27234 return NULL;
27235
27236 (*pos)++;
27237 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27238 int i;
27239
27240 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27241 - seq_putc(m, data[i]);
27242 + if (!seq_putc(m, data[i]))
27243 + return -EFAULT;
27244
27245 return 0;
27246 }
27247 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27248 log->bios_event_log_end = log->bios_event_log + len;
27249
27250 virt = acpi_os_map_memory(start, len);
27251 + if (!virt) {
27252 + kfree(log->bios_event_log);
27253 + log->bios_event_log = NULL;
27254 + return -EFAULT;
27255 + }
27256
27257 - memcpy(log->bios_event_log, virt, len);
27258 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27259
27260 acpi_os_unmap_memory(virt, len);
27261 return 0;
27262 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27263 index 8e3c46d..c139b99 100644
27264 --- a/drivers/char/virtio_console.c
27265 +++ b/drivers/char/virtio_console.c
27266 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27267 if (to_user) {
27268 ssize_t ret;
27269
27270 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27271 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27272 if (ret)
27273 return -EFAULT;
27274 } else {
27275 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27276 if (!port_has_data(port) && !port->host_connected)
27277 return 0;
27278
27279 - return fill_readbuf(port, ubuf, count, true);
27280 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27281 }
27282
27283 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27284 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27285 index eb1d864..39ee5a7 100644
27286 --- a/drivers/dma/dmatest.c
27287 +++ b/drivers/dma/dmatest.c
27288 @@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27289 }
27290 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27291 cnt = dmatest_add_threads(dtc, DMA_PQ);
27292 - thread_count += cnt > 0 ?: 0;
27293 + thread_count += cnt > 0 ? cnt : 0;
27294 }
27295
27296 pr_info("dmatest: Started %u threads using %s\n",
27297 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27298 index c9eee6d..f9d5280 100644
27299 --- a/drivers/edac/amd64_edac.c
27300 +++ b/drivers/edac/amd64_edac.c
27301 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27302 * PCI core identifies what devices are on a system during boot, and then
27303 * inquiry this table to see if this driver is for a given device found.
27304 */
27305 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27306 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27307 {
27308 .vendor = PCI_VENDOR_ID_AMD,
27309 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27310 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27311 index e47e73b..348e0bd 100644
27312 --- a/drivers/edac/amd76x_edac.c
27313 +++ b/drivers/edac/amd76x_edac.c
27314 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27315 edac_mc_free(mci);
27316 }
27317
27318 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27319 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27320 {
27321 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27322 AMD762},
27323 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27324 index 1af531a..3a8ff27 100644
27325 --- a/drivers/edac/e752x_edac.c
27326 +++ b/drivers/edac/e752x_edac.c
27327 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27328 edac_mc_free(mci);
27329 }
27330
27331 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27332 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27333 {
27334 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27335 E7520},
27336 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27337 index 6ffb6d2..383d8d7 100644
27338 --- a/drivers/edac/e7xxx_edac.c
27339 +++ b/drivers/edac/e7xxx_edac.c
27340 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27341 edac_mc_free(mci);
27342 }
27343
27344 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27345 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27346 {
27347 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27348 E7205},
27349 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27350 index 495198a..ac08c85 100644
27351 --- a/drivers/edac/edac_pci_sysfs.c
27352 +++ b/drivers/edac/edac_pci_sysfs.c
27353 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27354 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27355 static int edac_pci_poll_msec = 1000; /* one second workq period */
27356
27357 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27358 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27359 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27360 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27361
27362 static struct kobject *edac_pci_top_main_kobj;
27363 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27364 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27365 edac_printk(KERN_CRIT, EDAC_PCI,
27366 "Signaled System Error on %s\n",
27367 pci_name(dev));
27368 - atomic_inc(&pci_nonparity_count);
27369 + atomic_inc_unchecked(&pci_nonparity_count);
27370 }
27371
27372 if (status & (PCI_STATUS_PARITY)) {
27373 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27374 "Master Data Parity Error on %s\n",
27375 pci_name(dev));
27376
27377 - atomic_inc(&pci_parity_count);
27378 + atomic_inc_unchecked(&pci_parity_count);
27379 }
27380
27381 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27382 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27383 "Detected Parity Error on %s\n",
27384 pci_name(dev));
27385
27386 - atomic_inc(&pci_parity_count);
27387 + atomic_inc_unchecked(&pci_parity_count);
27388 }
27389 }
27390
27391 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27392 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27393 "Signaled System Error on %s\n",
27394 pci_name(dev));
27395 - atomic_inc(&pci_nonparity_count);
27396 + atomic_inc_unchecked(&pci_nonparity_count);
27397 }
27398
27399 if (status & (PCI_STATUS_PARITY)) {
27400 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27401 "Master Data Parity Error on "
27402 "%s\n", pci_name(dev));
27403
27404 - atomic_inc(&pci_parity_count);
27405 + atomic_inc_unchecked(&pci_parity_count);
27406 }
27407
27408 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27409 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27410 "Detected Parity Error on %s\n",
27411 pci_name(dev));
27412
27413 - atomic_inc(&pci_parity_count);
27414 + atomic_inc_unchecked(&pci_parity_count);
27415 }
27416 }
27417 }
27418 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27419 if (!check_pci_errors)
27420 return;
27421
27422 - before_count = atomic_read(&pci_parity_count);
27423 + before_count = atomic_read_unchecked(&pci_parity_count);
27424
27425 /* scan all PCI devices looking for a Parity Error on devices and
27426 * bridges.
27427 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27428 /* Only if operator has selected panic on PCI Error */
27429 if (edac_pci_get_panic_on_pe()) {
27430 /* If the count is different 'after' from 'before' */
27431 - if (before_count != atomic_read(&pci_parity_count))
27432 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27433 panic("EDAC: PCI Parity Error");
27434 }
27435 }
27436 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27437 index c0510b3..6e2a954 100644
27438 --- a/drivers/edac/i3000_edac.c
27439 +++ b/drivers/edac/i3000_edac.c
27440 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27441 edac_mc_free(mci);
27442 }
27443
27444 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27445 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27446 {
27447 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27448 I3000},
27449 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27450 index aa08497..7e6822a 100644
27451 --- a/drivers/edac/i3200_edac.c
27452 +++ b/drivers/edac/i3200_edac.c
27453 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27454 edac_mc_free(mci);
27455 }
27456
27457 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27458 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27459 {
27460 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27461 I3200},
27462 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27463 index 4dc3ac2..67d05a6 100644
27464 --- a/drivers/edac/i5000_edac.c
27465 +++ b/drivers/edac/i5000_edac.c
27466 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27467 *
27468 * The "E500P" device is the first device supported.
27469 */
27470 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27471 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27472 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27473 .driver_data = I5000P},
27474
27475 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27476 index bcbdeec..9886d16 100644
27477 --- a/drivers/edac/i5100_edac.c
27478 +++ b/drivers/edac/i5100_edac.c
27479 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27480 edac_mc_free(mci);
27481 }
27482
27483 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27484 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27485 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27486 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27487 { 0, }
27488 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27489 index 74d6ec34..baff517 100644
27490 --- a/drivers/edac/i5400_edac.c
27491 +++ b/drivers/edac/i5400_edac.c
27492 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27493 *
27494 * The "E500P" device is the first device supported.
27495 */
27496 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27497 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27498 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27499 {0,} /* 0 terminated list. */
27500 };
27501 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27502 index 6104dba..e7ea8e1 100644
27503 --- a/drivers/edac/i7300_edac.c
27504 +++ b/drivers/edac/i7300_edac.c
27505 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27506 *
27507 * Has only 8086:360c PCI ID
27508 */
27509 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27510 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27511 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27512 {0,} /* 0 terminated list. */
27513 };
27514 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27515 index 70ad892..178943c 100644
27516 --- a/drivers/edac/i7core_edac.c
27517 +++ b/drivers/edac/i7core_edac.c
27518 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27519 /*
27520 * pci_device_id table for which devices we are looking for
27521 */
27522 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27523 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27524 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27525 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27526 {0,} /* 0 terminated list. */
27527 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27528 index 4329d39..f3022ef 100644
27529 --- a/drivers/edac/i82443bxgx_edac.c
27530 +++ b/drivers/edac/i82443bxgx_edac.c
27531 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27532
27533 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27534
27535 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27536 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27537 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27538 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27539 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27540 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27541 index 931a057..fd28340 100644
27542 --- a/drivers/edac/i82860_edac.c
27543 +++ b/drivers/edac/i82860_edac.c
27544 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27545 edac_mc_free(mci);
27546 }
27547
27548 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27549 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27550 {
27551 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27552 I82860},
27553 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27554 index 33864c6..01edc61 100644
27555 --- a/drivers/edac/i82875p_edac.c
27556 +++ b/drivers/edac/i82875p_edac.c
27557 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27558 edac_mc_free(mci);
27559 }
27560
27561 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27562 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27563 {
27564 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27565 I82875P},
27566 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27567 index a5da732..983363b 100644
27568 --- a/drivers/edac/i82975x_edac.c
27569 +++ b/drivers/edac/i82975x_edac.c
27570 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27571 edac_mc_free(mci);
27572 }
27573
27574 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27575 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27576 {
27577 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27578 I82975X
27579 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27580 index 0106747..0b40417 100644
27581 --- a/drivers/edac/mce_amd.h
27582 +++ b/drivers/edac/mce_amd.h
27583 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
27584 bool (*dc_mce)(u16, u8);
27585 bool (*ic_mce)(u16, u8);
27586 bool (*nb_mce)(u16, u8);
27587 -};
27588 +} __no_const;
27589
27590 void amd_report_gart_errors(bool);
27591 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27592 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27593 index b153674..ad2ba9b 100644
27594 --- a/drivers/edac/r82600_edac.c
27595 +++ b/drivers/edac/r82600_edac.c
27596 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27597 edac_mc_free(mci);
27598 }
27599
27600 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27601 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27602 {
27603 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27604 },
27605 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27606 index 7a402bf..af0b211 100644
27607 --- a/drivers/edac/sb_edac.c
27608 +++ b/drivers/edac/sb_edac.c
27609 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27610 /*
27611 * pci_device_id table for which devices we are looking for
27612 */
27613 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27614 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27615 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27616 {0,} /* 0 terminated list. */
27617 };
27618 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27619 index b6f47de..c5acf3a 100644
27620 --- a/drivers/edac/x38_edac.c
27621 +++ b/drivers/edac/x38_edac.c
27622 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27623 edac_mc_free(mci);
27624 }
27625
27626 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27627 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27628 {
27629 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27630 X38},
27631 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27632 index 85661b0..c784559a 100644
27633 --- a/drivers/firewire/core-card.c
27634 +++ b/drivers/firewire/core-card.c
27635 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27636
27637 void fw_core_remove_card(struct fw_card *card)
27638 {
27639 - struct fw_card_driver dummy_driver = dummy_driver_template;
27640 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
27641
27642 card->driver->update_phy_reg(card, 4,
27643 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27644 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27645 index 4799393..37bd3ab 100644
27646 --- a/drivers/firewire/core-cdev.c
27647 +++ b/drivers/firewire/core-cdev.c
27648 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27649 int ret;
27650
27651 if ((request->channels == 0 && request->bandwidth == 0) ||
27652 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27653 - request->bandwidth < 0)
27654 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27655 return -EINVAL;
27656
27657 r = kmalloc(sizeof(*r), GFP_KERNEL);
27658 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27659 index 855ab3f..11f4bbd 100644
27660 --- a/drivers/firewire/core-transaction.c
27661 +++ b/drivers/firewire/core-transaction.c
27662 @@ -37,6 +37,7 @@
27663 #include <linux/timer.h>
27664 #include <linux/types.h>
27665 #include <linux/workqueue.h>
27666 +#include <linux/sched.h>
27667
27668 #include <asm/byteorder.h>
27669
27670 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27671 index b45be57..5fad18b 100644
27672 --- a/drivers/firewire/core.h
27673 +++ b/drivers/firewire/core.h
27674 @@ -101,6 +101,7 @@ struct fw_card_driver {
27675
27676 int (*stop_iso)(struct fw_iso_context *ctx);
27677 };
27678 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27679
27680 void fw_card_initialize(struct fw_card *card,
27681 const struct fw_card_driver *driver, struct device *device);
27682 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27683 index 153980b..4b4d046 100644
27684 --- a/drivers/firmware/dmi_scan.c
27685 +++ b/drivers/firmware/dmi_scan.c
27686 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27687 }
27688 }
27689 else {
27690 - /*
27691 - * no iounmap() for that ioremap(); it would be a no-op, but
27692 - * it's so early in setup that sucker gets confused into doing
27693 - * what it shouldn't if we actually call it.
27694 - */
27695 p = dmi_ioremap(0xF0000, 0x10000);
27696 if (p == NULL)
27697 goto error;
27698 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27699 if (buf == NULL)
27700 return -1;
27701
27702 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27703 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27704
27705 iounmap(buf);
27706 return 0;
27707 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27708 index 98723cb..10ca85b 100644
27709 --- a/drivers/gpio/gpio-vr41xx.c
27710 +++ b/drivers/gpio/gpio-vr41xx.c
27711 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27712 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27713 maskl, pendl, maskh, pendh);
27714
27715 - atomic_inc(&irq_err_count);
27716 + atomic_inc_unchecked(&irq_err_count);
27717
27718 return -EINVAL;
27719 }
27720 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27721 index 8323fc3..5c1d755 100644
27722 --- a/drivers/gpu/drm/drm_crtc.c
27723 +++ b/drivers/gpu/drm/drm_crtc.c
27724 @@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27725 */
27726 if ((out_resp->count_modes >= mode_count) && mode_count) {
27727 copied = 0;
27728 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27729 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27730 list_for_each_entry(mode, &connector->modes, head) {
27731 drm_crtc_convert_to_umode(&u_mode, mode);
27732 if (copy_to_user(mode_ptr + copied,
27733 @@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27734
27735 if ((out_resp->count_props >= props_count) && props_count) {
27736 copied = 0;
27737 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27738 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27739 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27740 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27741 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27742 if (connector->property_ids[i] != 0) {
27743 if (put_user(connector->property_ids[i],
27744 @@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27745
27746 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27747 copied = 0;
27748 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27749 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27750 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27751 if (connector->encoder_ids[i] != 0) {
27752 if (put_user(connector->encoder_ids[i],
27753 @@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
27754 }
27755
27756 for (i = 0; i < crtc_req->count_connectors; i++) {
27757 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27758 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27759 if (get_user(out_id, &set_connectors_ptr[i])) {
27760 ret = -EFAULT;
27761 goto out;
27762 @@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27763 fb = obj_to_fb(obj);
27764
27765 num_clips = r->num_clips;
27766 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27767 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27768
27769 if (!num_clips != !clips_ptr) {
27770 ret = -EINVAL;
27771 @@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27772 out_resp->flags = property->flags;
27773
27774 if ((out_resp->count_values >= value_count) && value_count) {
27775 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27776 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27777 for (i = 0; i < value_count; i++) {
27778 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27779 ret = -EFAULT;
27780 @@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27781 if (property->flags & DRM_MODE_PROP_ENUM) {
27782 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27783 copied = 0;
27784 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27785 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27786 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27787
27788 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27789 @@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27790 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27791 copied = 0;
27792 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27793 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27794 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27795
27796 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27797 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27798 @@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27799 struct drm_mode_get_blob *out_resp = data;
27800 struct drm_property_blob *blob;
27801 int ret = 0;
27802 - void *blob_ptr;
27803 + void __user *blob_ptr;
27804
27805 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27806 return -EINVAL;
27807 @@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27808 blob = obj_to_blob(obj);
27809
27810 if (out_resp->length == blob->length) {
27811 - blob_ptr = (void *)(unsigned long)out_resp->data;
27812 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
27813 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27814 ret = -EFAULT;
27815 goto done;
27816 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27817 index d2619d7..bd6bd00 100644
27818 --- a/drivers/gpu/drm/drm_crtc_helper.c
27819 +++ b/drivers/gpu/drm/drm_crtc_helper.c
27820 @@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
27821 struct drm_crtc *tmp;
27822 int crtc_mask = 1;
27823
27824 - WARN(!crtc, "checking null crtc?\n");
27825 + BUG_ON(!crtc);
27826
27827 dev = crtc->dev;
27828
27829 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27830 index 40c187c..5746164 100644
27831 --- a/drivers/gpu/drm/drm_drv.c
27832 +++ b/drivers/gpu/drm/drm_drv.c
27833 @@ -308,7 +308,7 @@ module_exit(drm_core_exit);
27834 /**
27835 * Copy and IOCTL return string to user space
27836 */
27837 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27838 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27839 {
27840 int len;
27841
27842 @@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
27843
27844 dev = file_priv->minor->dev;
27845 atomic_inc(&dev->ioctl_count);
27846 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27847 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27848 ++file_priv->ioctl_count;
27849
27850 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27851 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27852 index 828bf65..cdaa0e9 100644
27853 --- a/drivers/gpu/drm/drm_fops.c
27854 +++ b/drivers/gpu/drm/drm_fops.c
27855 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
27856 }
27857
27858 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27859 - atomic_set(&dev->counts[i], 0);
27860 + atomic_set_unchecked(&dev->counts[i], 0);
27861
27862 dev->sigdata.lock = NULL;
27863
27864 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
27865
27866 retcode = drm_open_helper(inode, filp, dev);
27867 if (!retcode) {
27868 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27869 - if (!dev->open_count++)
27870 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27871 + if (local_inc_return(&dev->open_count) == 1)
27872 retcode = drm_setup(dev);
27873 }
27874 if (!retcode) {
27875 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
27876
27877 mutex_lock(&drm_global_mutex);
27878
27879 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27880 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27881
27882 if (dev->driver->preclose)
27883 dev->driver->preclose(dev, file_priv);
27884 @@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
27885 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27886 task_pid_nr(current),
27887 (long)old_encode_dev(file_priv->minor->device),
27888 - dev->open_count);
27889 + local_read(&dev->open_count));
27890
27891 /* Release any auth tokens that might point to this file_priv,
27892 (do that under the drm_global_mutex) */
27893 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
27894 * End inline drm_release
27895 */
27896
27897 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27898 - if (!--dev->open_count) {
27899 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27900 + if (local_dec_and_test(&dev->open_count)) {
27901 if (atomic_read(&dev->ioctl_count)) {
27902 DRM_ERROR("Device busy: %d\n",
27903 atomic_read(&dev->ioctl_count));
27904 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27905 index c87dc96..326055d 100644
27906 --- a/drivers/gpu/drm/drm_global.c
27907 +++ b/drivers/gpu/drm/drm_global.c
27908 @@ -36,7 +36,7 @@
27909 struct drm_global_item {
27910 struct mutex mutex;
27911 void *object;
27912 - int refcount;
27913 + atomic_t refcount;
27914 };
27915
27916 static struct drm_global_item glob[DRM_GLOBAL_NUM];
27917 @@ -49,7 +49,7 @@ void drm_global_init(void)
27918 struct drm_global_item *item = &glob[i];
27919 mutex_init(&item->mutex);
27920 item->object = NULL;
27921 - item->refcount = 0;
27922 + atomic_set(&item->refcount, 0);
27923 }
27924 }
27925
27926 @@ -59,7 +59,7 @@ void drm_global_release(void)
27927 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
27928 struct drm_global_item *item = &glob[i];
27929 BUG_ON(item->object != NULL);
27930 - BUG_ON(item->refcount != 0);
27931 + BUG_ON(atomic_read(&item->refcount) != 0);
27932 }
27933 }
27934
27935 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27936 void *object;
27937
27938 mutex_lock(&item->mutex);
27939 - if (item->refcount == 0) {
27940 + if (atomic_read(&item->refcount) == 0) {
27941 item->object = kzalloc(ref->size, GFP_KERNEL);
27942 if (unlikely(item->object == NULL)) {
27943 ret = -ENOMEM;
27944 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27945 goto out_err;
27946
27947 }
27948 - ++item->refcount;
27949 + atomic_inc(&item->refcount);
27950 ref->object = item->object;
27951 object = item->object;
27952 mutex_unlock(&item->mutex);
27953 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
27954 struct drm_global_item *item = &glob[ref->global_type];
27955
27956 mutex_lock(&item->mutex);
27957 - BUG_ON(item->refcount == 0);
27958 + BUG_ON(atomic_read(&item->refcount) == 0);
27959 BUG_ON(ref->object != item->object);
27960 - if (--item->refcount == 0) {
27961 + if (atomic_dec_and_test(&item->refcount)) {
27962 ref->release(ref);
27963 item->object = NULL;
27964 }
27965 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
27966 index ab1162d..42587b2 100644
27967 --- a/drivers/gpu/drm/drm_info.c
27968 +++ b/drivers/gpu/drm/drm_info.c
27969 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
27970 struct drm_local_map *map;
27971 struct drm_map_list *r_list;
27972
27973 - /* Hardcoded from _DRM_FRAME_BUFFER,
27974 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
27975 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
27976 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
27977 + static const char * const types[] = {
27978 + [_DRM_FRAME_BUFFER] = "FB",
27979 + [_DRM_REGISTERS] = "REG",
27980 + [_DRM_SHM] = "SHM",
27981 + [_DRM_AGP] = "AGP",
27982 + [_DRM_SCATTER_GATHER] = "SG",
27983 + [_DRM_CONSISTENT] = "PCI",
27984 + [_DRM_GEM] = "GEM" };
27985 const char *type;
27986 int i;
27987
27988 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
27989 map = r_list->map;
27990 if (!map)
27991 continue;
27992 - if (map->type < 0 || map->type > 5)
27993 + if (map->type >= ARRAY_SIZE(types))
27994 type = "??";
27995 else
27996 type = types[map->type];
27997 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
27998 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
27999 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28000 vma->vm_flags & VM_IO ? 'i' : '-',
28001 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28002 + 0);
28003 +#else
28004 vma->vm_pgoff);
28005 +#endif
28006
28007 #if defined(__i386__)
28008 pgprot = pgprot_val(vma->vm_page_prot);
28009 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28010 index ddd70db..40321e6 100644
28011 --- a/drivers/gpu/drm/drm_ioc32.c
28012 +++ b/drivers/gpu/drm/drm_ioc32.c
28013 @@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28014 request = compat_alloc_user_space(nbytes);
28015 if (!access_ok(VERIFY_WRITE, request, nbytes))
28016 return -EFAULT;
28017 - list = (struct drm_buf_desc *) (request + 1);
28018 + list = (struct drm_buf_desc __user *) (request + 1);
28019
28020 if (__put_user(count, &request->count)
28021 || __put_user(list, &request->list))
28022 @@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28023 request = compat_alloc_user_space(nbytes);
28024 if (!access_ok(VERIFY_WRITE, request, nbytes))
28025 return -EFAULT;
28026 - list = (struct drm_buf_pub *) (request + 1);
28027 + list = (struct drm_buf_pub __user *) (request + 1);
28028
28029 if (__put_user(count, &request->count)
28030 || __put_user(list, &request->list))
28031 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28032 index 904d7e9..ab88581 100644
28033 --- a/drivers/gpu/drm/drm_ioctl.c
28034 +++ b/drivers/gpu/drm/drm_ioctl.c
28035 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28036 stats->data[i].value =
28037 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28038 else
28039 - stats->data[i].value = atomic_read(&dev->counts[i]);
28040 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28041 stats->data[i].type = dev->types[i];
28042 }
28043
28044 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28045 index 632ae24..244cf4a 100644
28046 --- a/drivers/gpu/drm/drm_lock.c
28047 +++ b/drivers/gpu/drm/drm_lock.c
28048 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28049 if (drm_lock_take(&master->lock, lock->context)) {
28050 master->lock.file_priv = file_priv;
28051 master->lock.lock_time = jiffies;
28052 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28053 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28054 break; /* Got lock */
28055 }
28056
28057 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28058 return -EINVAL;
28059 }
28060
28061 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28062 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28063
28064 if (drm_lock_free(&master->lock, lock->context)) {
28065 /* FIXME: Should really bail out here. */
28066 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28067 index 8f371e8..9f85d52 100644
28068 --- a/drivers/gpu/drm/i810/i810_dma.c
28069 +++ b/drivers/gpu/drm/i810/i810_dma.c
28070 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28071 dma->buflist[vertex->idx],
28072 vertex->discard, vertex->used);
28073
28074 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28075 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28076 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28077 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28078 sarea_priv->last_enqueue = dev_priv->counter - 1;
28079 sarea_priv->last_dispatch = (int)hw_status[5];
28080
28081 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28082 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28083 mc->last_render);
28084
28085 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28086 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28087 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28088 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28089 sarea_priv->last_enqueue = dev_priv->counter - 1;
28090 sarea_priv->last_dispatch = (int)hw_status[5];
28091
28092 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28093 index c9339f4..f5e1b9d 100644
28094 --- a/drivers/gpu/drm/i810/i810_drv.h
28095 +++ b/drivers/gpu/drm/i810/i810_drv.h
28096 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28097 int page_flipping;
28098
28099 wait_queue_head_t irq_queue;
28100 - atomic_t irq_received;
28101 - atomic_t irq_emitted;
28102 + atomic_unchecked_t irq_received;
28103 + atomic_unchecked_t irq_emitted;
28104
28105 int front_offset;
28106 } drm_i810_private_t;
28107 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28108 index 004b048..7588eba 100644
28109 --- a/drivers/gpu/drm/i915/i915_debugfs.c
28110 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
28111 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28112 I915_READ(GTIMR));
28113 }
28114 seq_printf(m, "Interrupts received: %d\n",
28115 - atomic_read(&dev_priv->irq_received));
28116 + atomic_read_unchecked(&dev_priv->irq_received));
28117 for (i = 0; i < I915_NUM_RINGS; i++) {
28118 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28119 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28120 @@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28121 return ret;
28122
28123 if (opregion->header)
28124 - seq_write(m, opregion->header, OPREGION_SIZE);
28125 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28126
28127 mutex_unlock(&dev->struct_mutex);
28128
28129 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28130 index a9ae374..43c1e9e 100644
28131 --- a/drivers/gpu/drm/i915/i915_dma.c
28132 +++ b/drivers/gpu/drm/i915/i915_dma.c
28133 @@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28134 bool can_switch;
28135
28136 spin_lock(&dev->count_lock);
28137 - can_switch = (dev->open_count == 0);
28138 + can_switch = (local_read(&dev->open_count) == 0);
28139 spin_unlock(&dev->count_lock);
28140 return can_switch;
28141 }
28142 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28143 index 554bef7..d24791c 100644
28144 --- a/drivers/gpu/drm/i915/i915_drv.h
28145 +++ b/drivers/gpu/drm/i915/i915_drv.h
28146 @@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28147 /* render clock increase/decrease */
28148 /* display clock increase/decrease */
28149 /* pll clock increase/decrease */
28150 -};
28151 +} __no_const;
28152
28153 struct intel_device_info {
28154 u8 gen;
28155 @@ -312,7 +312,7 @@ typedef struct drm_i915_private {
28156 int current_page;
28157 int page_flipping;
28158
28159 - atomic_t irq_received;
28160 + atomic_unchecked_t irq_received;
28161
28162 /* protects the irq masks */
28163 spinlock_t irq_lock;
28164 @@ -887,7 +887,7 @@ struct drm_i915_gem_object {
28165 * will be page flipped away on the next vblank. When it
28166 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28167 */
28168 - atomic_t pending_flip;
28169 + atomic_unchecked_t pending_flip;
28170 };
28171
28172 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28173 @@ -1267,7 +1267,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28174 extern void intel_teardown_gmbus(struct drm_device *dev);
28175 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28176 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28177 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28178 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28179 {
28180 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28181 }
28182 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28183 index b9da890..cad1d98 100644
28184 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28185 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28186 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28187 i915_gem_clflush_object(obj);
28188
28189 if (obj->base.pending_write_domain)
28190 - cd->flips |= atomic_read(&obj->pending_flip);
28191 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28192
28193 /* The actual obj->write_domain will be updated with
28194 * pending_write_domain after we emit the accumulated flush for all
28195 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28196
28197 static int
28198 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28199 - int count)
28200 + unsigned int count)
28201 {
28202 - int i;
28203 + unsigned int i;
28204
28205 for (i = 0; i < count; i++) {
28206 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28207 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28208 index b40004b..7c53a75 100644
28209 --- a/drivers/gpu/drm/i915/i915_irq.c
28210 +++ b/drivers/gpu/drm/i915/i915_irq.c
28211 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28212 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28213 struct drm_i915_master_private *master_priv;
28214
28215 - atomic_inc(&dev_priv->irq_received);
28216 + atomic_inc_unchecked(&dev_priv->irq_received);
28217
28218 /* disable master interrupt before clearing iir */
28219 de_ier = I915_READ(DEIER);
28220 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28221 struct drm_i915_master_private *master_priv;
28222 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28223
28224 - atomic_inc(&dev_priv->irq_received);
28225 + atomic_inc_unchecked(&dev_priv->irq_received);
28226
28227 if (IS_GEN6(dev))
28228 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28229 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28230 int ret = IRQ_NONE, pipe;
28231 bool blc_event = false;
28232
28233 - atomic_inc(&dev_priv->irq_received);
28234 + atomic_inc_unchecked(&dev_priv->irq_received);
28235
28236 iir = I915_READ(IIR);
28237
28238 @@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28239 {
28240 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28241
28242 - atomic_set(&dev_priv->irq_received, 0);
28243 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28244
28245 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28246 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28247 @@ -1931,7 +1931,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28248 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28249 int pipe;
28250
28251 - atomic_set(&dev_priv->irq_received, 0);
28252 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28253
28254 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28255 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28256 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28257 index daa5743..c0757a9 100644
28258 --- a/drivers/gpu/drm/i915/intel_display.c
28259 +++ b/drivers/gpu/drm/i915/intel_display.c
28260 @@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28261
28262 wait_event(dev_priv->pending_flip_queue,
28263 atomic_read(&dev_priv->mm.wedged) ||
28264 - atomic_read(&obj->pending_flip) == 0);
28265 + atomic_read_unchecked(&obj->pending_flip) == 0);
28266
28267 /* Big Hammer, we also need to ensure that any pending
28268 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28269 @@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28270 obj = to_intel_framebuffer(crtc->fb)->obj;
28271 dev_priv = crtc->dev->dev_private;
28272 wait_event(dev_priv->pending_flip_queue,
28273 - atomic_read(&obj->pending_flip) == 0);
28274 + atomic_read_unchecked(&obj->pending_flip) == 0);
28275 }
28276
28277 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28278 @@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28279
28280 atomic_clear_mask(1 << intel_crtc->plane,
28281 &obj->pending_flip.counter);
28282 - if (atomic_read(&obj->pending_flip) == 0)
28283 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
28284 wake_up(&dev_priv->pending_flip_queue);
28285
28286 schedule_work(&work->work);
28287 @@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28288 /* Block clients from rendering to the new back buffer until
28289 * the flip occurs and the object is no longer visible.
28290 */
28291 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28292 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28293
28294 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28295 if (ret)
28296 @@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28297 return 0;
28298
28299 cleanup_pending:
28300 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28301 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28302 drm_gem_object_unreference(&work->old_fb_obj->base);
28303 drm_gem_object_unreference(&obj->base);
28304 mutex_unlock(&dev->struct_mutex);
28305 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28306 index 54558a0..2d97005 100644
28307 --- a/drivers/gpu/drm/mga/mga_drv.h
28308 +++ b/drivers/gpu/drm/mga/mga_drv.h
28309 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28310 u32 clear_cmd;
28311 u32 maccess;
28312
28313 - atomic_t vbl_received; /**< Number of vblanks received. */
28314 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28315 wait_queue_head_t fence_queue;
28316 - atomic_t last_fence_retired;
28317 + atomic_unchecked_t last_fence_retired;
28318 u32 next_fence_to_post;
28319
28320 unsigned int fb_cpp;
28321 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28322 index 2581202..f230a8d9 100644
28323 --- a/drivers/gpu/drm/mga/mga_irq.c
28324 +++ b/drivers/gpu/drm/mga/mga_irq.c
28325 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28326 if (crtc != 0)
28327 return 0;
28328
28329 - return atomic_read(&dev_priv->vbl_received);
28330 + return atomic_read_unchecked(&dev_priv->vbl_received);
28331 }
28332
28333
28334 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28335 /* VBLANK interrupt */
28336 if (status & MGA_VLINEPEN) {
28337 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28338 - atomic_inc(&dev_priv->vbl_received);
28339 + atomic_inc_unchecked(&dev_priv->vbl_received);
28340 drm_handle_vblank(dev, 0);
28341 handled = 1;
28342 }
28343 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28344 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28345 MGA_WRITE(MGA_PRIMEND, prim_end);
28346
28347 - atomic_inc(&dev_priv->last_fence_retired);
28348 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28349 DRM_WAKEUP(&dev_priv->fence_queue);
28350 handled = 1;
28351 }
28352 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28353 * using fences.
28354 */
28355 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28356 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28357 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28358 - *sequence) <= (1 << 23)));
28359
28360 *sequence = cur_fence;
28361 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28362 index 5fc201b..7b032b9 100644
28363 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28364 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28365 @@ -201,7 +201,7 @@ struct methods {
28366 const char desc[8];
28367 void (*loadbios)(struct drm_device *, uint8_t *);
28368 const bool rw;
28369 -};
28370 +} __do_const;
28371
28372 static struct methods shadow_methods[] = {
28373 { "PRAMIN", load_vbios_pramin, true },
28374 @@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28375 struct bit_table {
28376 const char id;
28377 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28378 -};
28379 +} __no_const;
28380
28381 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28382
28383 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28384 index 4c0be3a..5757582 100644
28385 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28386 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28387 @@ -238,7 +238,7 @@ struct nouveau_channel {
28388 struct list_head pending;
28389 uint32_t sequence;
28390 uint32_t sequence_ack;
28391 - atomic_t last_sequence_irq;
28392 + atomic_unchecked_t last_sequence_irq;
28393 struct nouveau_vma vma;
28394 } fence;
28395
28396 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28397 u32 handle, u16 class);
28398 void (*set_tile_region)(struct drm_device *dev, int i);
28399 void (*tlb_flush)(struct drm_device *, int engine);
28400 -};
28401 +} __no_const;
28402
28403 struct nouveau_instmem_engine {
28404 void *priv;
28405 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28406 struct nouveau_mc_engine {
28407 int (*init)(struct drm_device *dev);
28408 void (*takedown)(struct drm_device *dev);
28409 -};
28410 +} __no_const;
28411
28412 struct nouveau_timer_engine {
28413 int (*init)(struct drm_device *dev);
28414 void (*takedown)(struct drm_device *dev);
28415 uint64_t (*read)(struct drm_device *dev);
28416 -};
28417 +} __no_const;
28418
28419 struct nouveau_fb_engine {
28420 int num_tiles;
28421 @@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28422 void (*put)(struct drm_device *, struct nouveau_mem **);
28423
28424 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28425 -};
28426 +} __no_const;
28427
28428 struct nouveau_engine {
28429 struct nouveau_instmem_engine instmem;
28430 @@ -706,7 +706,7 @@ struct drm_nouveau_private {
28431 struct drm_global_reference mem_global_ref;
28432 struct ttm_bo_global_ref bo_global_ref;
28433 struct ttm_bo_device bdev;
28434 - atomic_t validate_sequence;
28435 + atomic_unchecked_t validate_sequence;
28436 } ttm;
28437
28438 struct {
28439 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28440 index 2f6daae..c9d7b9e 100644
28441 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28442 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28443 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28444 if (USE_REFCNT(dev))
28445 sequence = nvchan_rd32(chan, 0x48);
28446 else
28447 - sequence = atomic_read(&chan->fence.last_sequence_irq);
28448 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28449
28450 if (chan->fence.sequence_ack == sequence)
28451 goto out;
28452 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28453 return ret;
28454 }
28455
28456 - atomic_set(&chan->fence.last_sequence_irq, 0);
28457 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28458 return 0;
28459 }
28460
28461 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28462 index 5f0bc57..eb9fac8 100644
28463 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28464 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28465 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28466 int trycnt = 0;
28467 int ret, i;
28468
28469 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28470 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28471 retry:
28472 if (++trycnt > 100000) {
28473 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28474 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28475 index d8831ab..0ba8356 100644
28476 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
28477 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28478 @@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28479 bool can_switch;
28480
28481 spin_lock(&dev->count_lock);
28482 - can_switch = (dev->open_count == 0);
28483 + can_switch = (local_read(&dev->open_count) == 0);
28484 spin_unlock(&dev->count_lock);
28485 return can_switch;
28486 }
28487 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28488 index dbdea8e..cd6eeeb 100644
28489 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
28490 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28491 @@ -554,7 +554,7 @@ static int
28492 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28493 u32 class, u32 mthd, u32 data)
28494 {
28495 - atomic_set(&chan->fence.last_sequence_irq, data);
28496 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28497 return 0;
28498 }
28499
28500 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28501 index bcac90b..53bfc76 100644
28502 --- a/drivers/gpu/drm/r128/r128_cce.c
28503 +++ b/drivers/gpu/drm/r128/r128_cce.c
28504 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28505
28506 /* GH: Simple idle check.
28507 */
28508 - atomic_set(&dev_priv->idle_count, 0);
28509 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28510
28511 /* We don't support anything other than bus-mastering ring mode,
28512 * but the ring can be in either AGP or PCI space for the ring
28513 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28514 index 930c71b..499aded 100644
28515 --- a/drivers/gpu/drm/r128/r128_drv.h
28516 +++ b/drivers/gpu/drm/r128/r128_drv.h
28517 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28518 int is_pci;
28519 unsigned long cce_buffers_offset;
28520
28521 - atomic_t idle_count;
28522 + atomic_unchecked_t idle_count;
28523
28524 int page_flipping;
28525 int current_page;
28526 u32 crtc_offset;
28527 u32 crtc_offset_cntl;
28528
28529 - atomic_t vbl_received;
28530 + atomic_unchecked_t vbl_received;
28531
28532 u32 color_fmt;
28533 unsigned int front_offset;
28534 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28535 index 429d5a0..7e899ed 100644
28536 --- a/drivers/gpu/drm/r128/r128_irq.c
28537 +++ b/drivers/gpu/drm/r128/r128_irq.c
28538 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28539 if (crtc != 0)
28540 return 0;
28541
28542 - return atomic_read(&dev_priv->vbl_received);
28543 + return atomic_read_unchecked(&dev_priv->vbl_received);
28544 }
28545
28546 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28547 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28548 /* VBLANK interrupt */
28549 if (status & R128_CRTC_VBLANK_INT) {
28550 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28551 - atomic_inc(&dev_priv->vbl_received);
28552 + atomic_inc_unchecked(&dev_priv->vbl_received);
28553 drm_handle_vblank(dev, 0);
28554 return IRQ_HANDLED;
28555 }
28556 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28557 index a9e33ce..09edd4b 100644
28558 --- a/drivers/gpu/drm/r128/r128_state.c
28559 +++ b/drivers/gpu/drm/r128/r128_state.c
28560 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28561
28562 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28563 {
28564 - if (atomic_read(&dev_priv->idle_count) == 0)
28565 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28566 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28567 else
28568 - atomic_set(&dev_priv->idle_count, 0);
28569 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28570 }
28571
28572 #endif
28573 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28574 index 5a82b6b..9e69c73 100644
28575 --- a/drivers/gpu/drm/radeon/mkregtable.c
28576 +++ b/drivers/gpu/drm/radeon/mkregtable.c
28577 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28578 regex_t mask_rex;
28579 regmatch_t match[4];
28580 char buf[1024];
28581 - size_t end;
28582 + long end;
28583 int len;
28584 int done = 0;
28585 int r;
28586 unsigned o;
28587 struct offset *offset;
28588 char last_reg_s[10];
28589 - int last_reg;
28590 + unsigned long last_reg;
28591
28592 if (regcomp
28593 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28594 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28595 index 8227e76..ce0b195 100644
28596 --- a/drivers/gpu/drm/radeon/radeon.h
28597 +++ b/drivers/gpu/drm/radeon/radeon.h
28598 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28599 */
28600 struct radeon_fence_driver {
28601 uint32_t scratch_reg;
28602 - atomic_t seq;
28603 + atomic_unchecked_t seq;
28604 uint32_t last_seq;
28605 unsigned long last_jiffies;
28606 unsigned long last_timeout;
28607 @@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28608 int x2, int y2);
28609 void (*draw_auto)(struct radeon_device *rdev);
28610 void (*set_default_state)(struct radeon_device *rdev);
28611 -};
28612 +} __no_const;
28613
28614 struct r600_blit {
28615 struct mutex mutex;
28616 @@ -954,7 +954,7 @@ struct radeon_asic {
28617 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28618 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28619 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28620 -};
28621 +} __no_const;
28622
28623 /*
28624 * Asic structures
28625 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28626 index 9b39145..389b93b 100644
28627 --- a/drivers/gpu/drm/radeon/radeon_device.c
28628 +++ b/drivers/gpu/drm/radeon/radeon_device.c
28629 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28630 bool can_switch;
28631
28632 spin_lock(&dev->count_lock);
28633 - can_switch = (dev->open_count == 0);
28634 + can_switch = (local_read(&dev->open_count) == 0);
28635 spin_unlock(&dev->count_lock);
28636 return can_switch;
28637 }
28638 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28639 index a1b59ca..86f2d44 100644
28640 --- a/drivers/gpu/drm/radeon/radeon_drv.h
28641 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
28642 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28643
28644 /* SW interrupt */
28645 wait_queue_head_t swi_queue;
28646 - atomic_t swi_emitted;
28647 + atomic_unchecked_t swi_emitted;
28648 int vblank_crtc;
28649 uint32_t irq_enable_reg;
28650 uint32_t r500_disp_irq_reg;
28651 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28652 index 76ec0e9..6feb1a3 100644
28653 --- a/drivers/gpu/drm/radeon/radeon_fence.c
28654 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
28655 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28656 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28657 return 0;
28658 }
28659 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28660 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28661 if (!rdev->cp.ready)
28662 /* FIXME: cp is not running assume everythings is done right
28663 * away
28664 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28665 return r;
28666 }
28667 radeon_fence_write(rdev, 0);
28668 - atomic_set(&rdev->fence_drv.seq, 0);
28669 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28670 INIT_LIST_HEAD(&rdev->fence_drv.created);
28671 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28672 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28673 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28674 index 48b7cea..342236f 100644
28675 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28676 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28677 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28678 request = compat_alloc_user_space(sizeof(*request));
28679 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28680 || __put_user(req32.param, &request->param)
28681 - || __put_user((void __user *)(unsigned long)req32.value,
28682 + || __put_user((unsigned long)req32.value,
28683 &request->value))
28684 return -EFAULT;
28685
28686 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28687 index 00da384..32f972d 100644
28688 --- a/drivers/gpu/drm/radeon/radeon_irq.c
28689 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
28690 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28691 unsigned int ret;
28692 RING_LOCALS;
28693
28694 - atomic_inc(&dev_priv->swi_emitted);
28695 - ret = atomic_read(&dev_priv->swi_emitted);
28696 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28697 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28698
28699 BEGIN_RING(4);
28700 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28701 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28702 drm_radeon_private_t *dev_priv =
28703 (drm_radeon_private_t *) dev->dev_private;
28704
28705 - atomic_set(&dev_priv->swi_emitted, 0);
28706 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28707 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28708
28709 dev->max_vblank_count = 0x001fffff;
28710 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28711 index e8422ae..d22d4a8 100644
28712 --- a/drivers/gpu/drm/radeon/radeon_state.c
28713 +++ b/drivers/gpu/drm/radeon/radeon_state.c
28714 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28715 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28716 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28717
28718 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28719 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28720 sarea_priv->nbox * sizeof(depth_boxes[0])))
28721 return -EFAULT;
28722
28723 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28724 {
28725 drm_radeon_private_t *dev_priv = dev->dev_private;
28726 drm_radeon_getparam_t *param = data;
28727 - int value;
28728 + int value = 0;
28729
28730 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28731
28732 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28733 index 0b5468b..9c4b308 100644
28734 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
28735 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28736 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28737 }
28738 if (unlikely(ttm_vm_ops == NULL)) {
28739 ttm_vm_ops = vma->vm_ops;
28740 - radeon_ttm_vm_ops = *ttm_vm_ops;
28741 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28742 + pax_open_kernel();
28743 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28744 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28745 + pax_close_kernel();
28746 }
28747 vma->vm_ops = &radeon_ttm_vm_ops;
28748 return 0;
28749 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28750 index a9049ed..501f284 100644
28751 --- a/drivers/gpu/drm/radeon/rs690.c
28752 +++ b/drivers/gpu/drm/radeon/rs690.c
28753 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
28754 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28755 rdev->pm.sideport_bandwidth.full)
28756 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28757 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28758 + read_delay_latency.full = dfixed_const(800 * 1000);
28759 read_delay_latency.full = dfixed_div(read_delay_latency,
28760 rdev->pm.igp_sideport_mclk);
28761 + a.full = dfixed_const(370);
28762 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28763 } else {
28764 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28765 rdev->pm.k8_bandwidth.full)
28766 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28767 index 727e93d..1565650 100644
28768 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28769 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28770 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
28771 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28772 struct shrink_control *sc)
28773 {
28774 - static atomic_t start_pool = ATOMIC_INIT(0);
28775 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28776 unsigned i;
28777 - unsigned pool_offset = atomic_add_return(1, &start_pool);
28778 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28779 struct ttm_page_pool *pool;
28780 int shrink_pages = sc->nr_to_scan;
28781
28782 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28783 index 9cf87d9..2000b7d 100644
28784 --- a/drivers/gpu/drm/via/via_drv.h
28785 +++ b/drivers/gpu/drm/via/via_drv.h
28786 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28787 typedef uint32_t maskarray_t[5];
28788
28789 typedef struct drm_via_irq {
28790 - atomic_t irq_received;
28791 + atomic_unchecked_t irq_received;
28792 uint32_t pending_mask;
28793 uint32_t enable_mask;
28794 wait_queue_head_t irq_queue;
28795 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28796 struct timeval last_vblank;
28797 int last_vblank_valid;
28798 unsigned usec_per_vblank;
28799 - atomic_t vbl_received;
28800 + atomic_unchecked_t vbl_received;
28801 drm_via_state_t hc_state;
28802 char pci_buf[VIA_PCI_BUF_SIZE];
28803 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28804 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28805 index d391f48..10c8ca3 100644
28806 --- a/drivers/gpu/drm/via/via_irq.c
28807 +++ b/drivers/gpu/drm/via/via_irq.c
28808 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
28809 if (crtc != 0)
28810 return 0;
28811
28812 - return atomic_read(&dev_priv->vbl_received);
28813 + return atomic_read_unchecked(&dev_priv->vbl_received);
28814 }
28815
28816 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28817 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28818
28819 status = VIA_READ(VIA_REG_INTERRUPT);
28820 if (status & VIA_IRQ_VBLANK_PENDING) {
28821 - atomic_inc(&dev_priv->vbl_received);
28822 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28823 + atomic_inc_unchecked(&dev_priv->vbl_received);
28824 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28825 do_gettimeofday(&cur_vblank);
28826 if (dev_priv->last_vblank_valid) {
28827 dev_priv->usec_per_vblank =
28828 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28829 dev_priv->last_vblank = cur_vblank;
28830 dev_priv->last_vblank_valid = 1;
28831 }
28832 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28833 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28834 DRM_DEBUG("US per vblank is: %u\n",
28835 dev_priv->usec_per_vblank);
28836 }
28837 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28838
28839 for (i = 0; i < dev_priv->num_irqs; ++i) {
28840 if (status & cur_irq->pending_mask) {
28841 - atomic_inc(&cur_irq->irq_received);
28842 + atomic_inc_unchecked(&cur_irq->irq_received);
28843 DRM_WAKEUP(&cur_irq->irq_queue);
28844 handled = 1;
28845 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28846 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
28847 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28848 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28849 masks[irq][4]));
28850 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28851 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28852 } else {
28853 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28854 (((cur_irq_sequence =
28855 - atomic_read(&cur_irq->irq_received)) -
28856 + atomic_read_unchecked(&cur_irq->irq_received)) -
28857 *sequence) <= (1 << 23)));
28858 }
28859 *sequence = cur_irq_sequence;
28860 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
28861 }
28862
28863 for (i = 0; i < dev_priv->num_irqs; ++i) {
28864 - atomic_set(&cur_irq->irq_received, 0);
28865 + atomic_set_unchecked(&cur_irq->irq_received, 0);
28866 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28867 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28868 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28869 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
28870 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28871 case VIA_IRQ_RELATIVE:
28872 irqwait->request.sequence +=
28873 - atomic_read(&cur_irq->irq_received);
28874 + atomic_read_unchecked(&cur_irq->irq_received);
28875 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28876 case VIA_IRQ_ABSOLUTE:
28877 break;
28878 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28879 index dc27970..f18b008 100644
28880 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28881 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28882 @@ -260,7 +260,7 @@ struct vmw_private {
28883 * Fencing and IRQs.
28884 */
28885
28886 - atomic_t marker_seq;
28887 + atomic_unchecked_t marker_seq;
28888 wait_queue_head_t fence_queue;
28889 wait_queue_head_t fifo_queue;
28890 int fence_queue_waiters; /* Protected by hw_mutex */
28891 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28892 index a0c2f12..68ae6cb 100644
28893 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28894 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28895 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
28896 (unsigned int) min,
28897 (unsigned int) fifo->capabilities);
28898
28899 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28900 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28901 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
28902 vmw_marker_queue_init(&fifo->marker_queue);
28903 return vmw_fifo_send_fence(dev_priv, &dummy);
28904 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
28905 if (reserveable)
28906 iowrite32(bytes, fifo_mem +
28907 SVGA_FIFO_RESERVED);
28908 - return fifo_mem + (next_cmd >> 2);
28909 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
28910 } else {
28911 need_bounce = true;
28912 }
28913 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
28914
28915 fm = vmw_fifo_reserve(dev_priv, bytes);
28916 if (unlikely(fm == NULL)) {
28917 - *seqno = atomic_read(&dev_priv->marker_seq);
28918 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
28919 ret = -ENOMEM;
28920 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
28921 false, 3*HZ);
28922 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
28923 }
28924
28925 do {
28926 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
28927 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
28928 } while (*seqno == 0);
28929
28930 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
28931 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28932 index cabc95f..14b3d77 100644
28933 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28934 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28935 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
28936 * emitted. Then the fence is stale and signaled.
28937 */
28938
28939 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
28940 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
28941 > VMW_FENCE_WRAP);
28942
28943 return ret;
28944 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
28945
28946 if (fifo_idle)
28947 down_read(&fifo_state->rwsem);
28948 - signal_seq = atomic_read(&dev_priv->marker_seq);
28949 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
28950 ret = 0;
28951
28952 for (;;) {
28953 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28954 index 8a8725c..afed796 100644
28955 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28956 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28957 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
28958 while (!vmw_lag_lt(queue, us)) {
28959 spin_lock(&queue->lock);
28960 if (list_empty(&queue->head))
28961 - seqno = atomic_read(&dev_priv->marker_seq);
28962 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
28963 else {
28964 marker = list_first_entry(&queue->head,
28965 struct vmw_marker, head);
28966 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
28967 index bb656d8..4169fca 100644
28968 --- a/drivers/hid/hid-core.c
28969 +++ b/drivers/hid/hid-core.c
28970 @@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
28971
28972 int hid_add_device(struct hid_device *hdev)
28973 {
28974 - static atomic_t id = ATOMIC_INIT(0);
28975 + static atomic_unchecked_t id = ATOMIC_INIT(0);
28976 int ret;
28977
28978 if (WARN_ON(hdev->status & HID_STAT_ADDED))
28979 @@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
28980 /* XXX hack, any other cleaner solution after the driver core
28981 * is converted to allow more than 20 bytes as the device name? */
28982 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
28983 - hdev->vendor, hdev->product, atomic_inc_return(&id));
28984 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
28985
28986 hid_debug_register(hdev, dev_name(&hdev->dev));
28987 ret = device_add(&hdev->dev);
28988 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
28989 index 4ef02b2..8a96831 100644
28990 --- a/drivers/hid/usbhid/hiddev.c
28991 +++ b/drivers/hid/usbhid/hiddev.c
28992 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
28993 break;
28994
28995 case HIDIOCAPPLICATION:
28996 - if (arg < 0 || arg >= hid->maxapplication)
28997 + if (arg >= hid->maxapplication)
28998 break;
28999
29000 for (i = 0; i < hid->maxcollection; i++)
29001 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29002 index 4065374..10ed7dc 100644
29003 --- a/drivers/hv/channel.c
29004 +++ b/drivers/hv/channel.c
29005 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29006 int ret = 0;
29007 int t;
29008
29009 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29010 - atomic_inc(&vmbus_connection.next_gpadl_handle);
29011 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29012 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29013
29014 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29015 if (ret)
29016 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29017 index 0fb100e..baf87e5 100644
29018 --- a/drivers/hv/hv.c
29019 +++ b/drivers/hv/hv.c
29020 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29021 u64 output_address = (output) ? virt_to_phys(output) : 0;
29022 u32 output_address_hi = output_address >> 32;
29023 u32 output_address_lo = output_address & 0xFFFFFFFF;
29024 - void *hypercall_page = hv_context.hypercall_page;
29025 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29026
29027 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29028 "=a"(hv_status_lo) : "d" (control_hi),
29029 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29030 index 0aee112..b72d21f 100644
29031 --- a/drivers/hv/hyperv_vmbus.h
29032 +++ b/drivers/hv/hyperv_vmbus.h
29033 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
29034 struct vmbus_connection {
29035 enum vmbus_connect_state conn_state;
29036
29037 - atomic_t next_gpadl_handle;
29038 + atomic_unchecked_t next_gpadl_handle;
29039
29040 /*
29041 * Represents channel interrupts. Each bit position represents a
29042 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29043 index d2d0a2a..90b8f4d 100644
29044 --- a/drivers/hv/vmbus_drv.c
29045 +++ b/drivers/hv/vmbus_drv.c
29046 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29047 {
29048 int ret = 0;
29049
29050 - static atomic_t device_num = ATOMIC_INIT(0);
29051 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29052
29053 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29054 - atomic_inc_return(&device_num));
29055 + atomic_inc_return_unchecked(&device_num));
29056
29057 child_device_obj->device.bus = &hv_bus;
29058 child_device_obj->device.parent = &hv_acpi_dev->dev;
29059 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29060 index 66f6729..2d6de0a 100644
29061 --- a/drivers/hwmon/acpi_power_meter.c
29062 +++ b/drivers/hwmon/acpi_power_meter.c
29063 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29064 return res;
29065
29066 temp /= 1000;
29067 - if (temp < 0)
29068 - return -EINVAL;
29069
29070 mutex_lock(&resource->lock);
29071 resource->trip[attr->index - 7] = temp;
29072 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29073 index 5357925..6cf0418 100644
29074 --- a/drivers/hwmon/sht15.c
29075 +++ b/drivers/hwmon/sht15.c
29076 @@ -166,7 +166,7 @@ struct sht15_data {
29077 int supply_uV;
29078 bool supply_uV_valid;
29079 struct work_struct update_supply_work;
29080 - atomic_t interrupt_handled;
29081 + atomic_unchecked_t interrupt_handled;
29082 };
29083
29084 /**
29085 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29086 return ret;
29087
29088 gpio_direction_input(data->pdata->gpio_data);
29089 - atomic_set(&data->interrupt_handled, 0);
29090 + atomic_set_unchecked(&data->interrupt_handled, 0);
29091
29092 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29093 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29094 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29095 /* Only relevant if the interrupt hasn't occurred. */
29096 - if (!atomic_read(&data->interrupt_handled))
29097 + if (!atomic_read_unchecked(&data->interrupt_handled))
29098 schedule_work(&data->read_work);
29099 }
29100 ret = wait_event_timeout(data->wait_queue,
29101 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29102
29103 /* First disable the interrupt */
29104 disable_irq_nosync(irq);
29105 - atomic_inc(&data->interrupt_handled);
29106 + atomic_inc_unchecked(&data->interrupt_handled);
29107 /* Then schedule a reading work struct */
29108 if (data->state != SHT15_READING_NOTHING)
29109 schedule_work(&data->read_work);
29110 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29111 * If not, then start the interrupt again - care here as could
29112 * have gone low in meantime so verify it hasn't!
29113 */
29114 - atomic_set(&data->interrupt_handled, 0);
29115 + atomic_set_unchecked(&data->interrupt_handled, 0);
29116 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29117 /* If still not occurred or another handler has been scheduled */
29118 if (gpio_get_value(data->pdata->gpio_data)
29119 - || atomic_read(&data->interrupt_handled))
29120 + || atomic_read_unchecked(&data->interrupt_handled))
29121 return;
29122 }
29123
29124 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29125 index 378fcb5..5e91fa8 100644
29126 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
29127 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29128 @@ -43,7 +43,7 @@
29129 extern struct i2c_adapter amd756_smbus;
29130
29131 static struct i2c_adapter *s4882_adapter;
29132 -static struct i2c_algorithm *s4882_algo;
29133 +static i2c_algorithm_no_const *s4882_algo;
29134
29135 /* Wrapper access functions for multiplexed SMBus */
29136 static DEFINE_MUTEX(amd756_lock);
29137 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29138 index 29015eb..af2d8e9 100644
29139 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29140 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29141 @@ -41,7 +41,7 @@
29142 extern struct i2c_adapter *nforce2_smbus;
29143
29144 static struct i2c_adapter *s4985_adapter;
29145 -static struct i2c_algorithm *s4985_algo;
29146 +static i2c_algorithm_no_const *s4985_algo;
29147
29148 /* Wrapper access functions for multiplexed SMBus */
29149 static DEFINE_MUTEX(nforce2_lock);
29150 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29151 index d7a4833..7fae376 100644
29152 --- a/drivers/i2c/i2c-mux.c
29153 +++ b/drivers/i2c/i2c-mux.c
29154 @@ -28,7 +28,7 @@
29155 /* multiplexer per channel data */
29156 struct i2c_mux_priv {
29157 struct i2c_adapter adap;
29158 - struct i2c_algorithm algo;
29159 + i2c_algorithm_no_const algo;
29160
29161 struct i2c_adapter *parent;
29162 void *mux_dev; /* the mux chip/device */
29163 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29164 index 57d00ca..0145194 100644
29165 --- a/drivers/ide/aec62xx.c
29166 +++ b/drivers/ide/aec62xx.c
29167 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29168 .cable_detect = atp86x_cable_detect,
29169 };
29170
29171 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29172 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29173 { /* 0: AEC6210 */
29174 .name = DRV_NAME,
29175 .init_chipset = init_chipset_aec62xx,
29176 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29177 index 2c8016a..911a27c 100644
29178 --- a/drivers/ide/alim15x3.c
29179 +++ b/drivers/ide/alim15x3.c
29180 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29181 .dma_sff_read_status = ide_dma_sff_read_status,
29182 };
29183
29184 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
29185 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
29186 .name = DRV_NAME,
29187 .init_chipset = init_chipset_ali15x3,
29188 .init_hwif = init_hwif_ali15x3,
29189 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29190 index 3747b25..56fc995 100644
29191 --- a/drivers/ide/amd74xx.c
29192 +++ b/drivers/ide/amd74xx.c
29193 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29194 .udma_mask = udma, \
29195 }
29196
29197 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29198 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29199 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29200 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29201 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29202 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29203 index 15f0ead..cb43480 100644
29204 --- a/drivers/ide/atiixp.c
29205 +++ b/drivers/ide/atiixp.c
29206 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29207 .cable_detect = atiixp_cable_detect,
29208 };
29209
29210 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29211 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29212 { /* 0: IXP200/300/400/700 */
29213 .name = DRV_NAME,
29214 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29215 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29216 index 5f80312..d1fc438 100644
29217 --- a/drivers/ide/cmd64x.c
29218 +++ b/drivers/ide/cmd64x.c
29219 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29220 .dma_sff_read_status = ide_dma_sff_read_status,
29221 };
29222
29223 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29224 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29225 { /* 0: CMD643 */
29226 .name = DRV_NAME,
29227 .init_chipset = init_chipset_cmd64x,
29228 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29229 index 2c1e5f7..1444762 100644
29230 --- a/drivers/ide/cs5520.c
29231 +++ b/drivers/ide/cs5520.c
29232 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29233 .set_dma_mode = cs5520_set_dma_mode,
29234 };
29235
29236 -static const struct ide_port_info cyrix_chipset __devinitdata = {
29237 +static const struct ide_port_info cyrix_chipset __devinitconst = {
29238 .name = DRV_NAME,
29239 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29240 .port_ops = &cs5520_port_ops,
29241 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29242 index 4dc4eb9..49b40ad 100644
29243 --- a/drivers/ide/cs5530.c
29244 +++ b/drivers/ide/cs5530.c
29245 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29246 .udma_filter = cs5530_udma_filter,
29247 };
29248
29249 -static const struct ide_port_info cs5530_chipset __devinitdata = {
29250 +static const struct ide_port_info cs5530_chipset __devinitconst = {
29251 .name = DRV_NAME,
29252 .init_chipset = init_chipset_cs5530,
29253 .init_hwif = init_hwif_cs5530,
29254 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29255 index 5059faf..18d4c85 100644
29256 --- a/drivers/ide/cs5535.c
29257 +++ b/drivers/ide/cs5535.c
29258 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29259 .cable_detect = cs5535_cable_detect,
29260 };
29261
29262 -static const struct ide_port_info cs5535_chipset __devinitdata = {
29263 +static const struct ide_port_info cs5535_chipset __devinitconst = {
29264 .name = DRV_NAME,
29265 .port_ops = &cs5535_port_ops,
29266 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29267 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29268 index 847553f..3ffb49d 100644
29269 --- a/drivers/ide/cy82c693.c
29270 +++ b/drivers/ide/cy82c693.c
29271 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29272 .set_dma_mode = cy82c693_set_dma_mode,
29273 };
29274
29275 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
29276 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
29277 .name = DRV_NAME,
29278 .init_iops = init_iops_cy82c693,
29279 .port_ops = &cy82c693_port_ops,
29280 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29281 index 58c51cd..4aec3b8 100644
29282 --- a/drivers/ide/hpt366.c
29283 +++ b/drivers/ide/hpt366.c
29284 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29285 }
29286 };
29287
29288 -static const struct hpt_info hpt36x __devinitdata = {
29289 +static const struct hpt_info hpt36x __devinitconst = {
29290 .chip_name = "HPT36x",
29291 .chip_type = HPT36x,
29292 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29293 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29294 .timings = &hpt36x_timings
29295 };
29296
29297 -static const struct hpt_info hpt370 __devinitdata = {
29298 +static const struct hpt_info hpt370 __devinitconst = {
29299 .chip_name = "HPT370",
29300 .chip_type = HPT370,
29301 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29302 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29303 .timings = &hpt37x_timings
29304 };
29305
29306 -static const struct hpt_info hpt370a __devinitdata = {
29307 +static const struct hpt_info hpt370a __devinitconst = {
29308 .chip_name = "HPT370A",
29309 .chip_type = HPT370A,
29310 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29311 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29312 .timings = &hpt37x_timings
29313 };
29314
29315 -static const struct hpt_info hpt374 __devinitdata = {
29316 +static const struct hpt_info hpt374 __devinitconst = {
29317 .chip_name = "HPT374",
29318 .chip_type = HPT374,
29319 .udma_mask = ATA_UDMA5,
29320 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29321 .timings = &hpt37x_timings
29322 };
29323
29324 -static const struct hpt_info hpt372 __devinitdata = {
29325 +static const struct hpt_info hpt372 __devinitconst = {
29326 .chip_name = "HPT372",
29327 .chip_type = HPT372,
29328 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29329 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29330 .timings = &hpt37x_timings
29331 };
29332
29333 -static const struct hpt_info hpt372a __devinitdata = {
29334 +static const struct hpt_info hpt372a __devinitconst = {
29335 .chip_name = "HPT372A",
29336 .chip_type = HPT372A,
29337 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29338 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29339 .timings = &hpt37x_timings
29340 };
29341
29342 -static const struct hpt_info hpt302 __devinitdata = {
29343 +static const struct hpt_info hpt302 __devinitconst = {
29344 .chip_name = "HPT302",
29345 .chip_type = HPT302,
29346 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29347 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29348 .timings = &hpt37x_timings
29349 };
29350
29351 -static const struct hpt_info hpt371 __devinitdata = {
29352 +static const struct hpt_info hpt371 __devinitconst = {
29353 .chip_name = "HPT371",
29354 .chip_type = HPT371,
29355 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29356 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29357 .timings = &hpt37x_timings
29358 };
29359
29360 -static const struct hpt_info hpt372n __devinitdata = {
29361 +static const struct hpt_info hpt372n __devinitconst = {
29362 .chip_name = "HPT372N",
29363 .chip_type = HPT372N,
29364 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29365 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29366 .timings = &hpt37x_timings
29367 };
29368
29369 -static const struct hpt_info hpt302n __devinitdata = {
29370 +static const struct hpt_info hpt302n __devinitconst = {
29371 .chip_name = "HPT302N",
29372 .chip_type = HPT302N,
29373 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29374 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29375 .timings = &hpt37x_timings
29376 };
29377
29378 -static const struct hpt_info hpt371n __devinitdata = {
29379 +static const struct hpt_info hpt371n __devinitconst = {
29380 .chip_name = "HPT371N",
29381 .chip_type = HPT371N,
29382 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29383 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29384 .dma_sff_read_status = ide_dma_sff_read_status,
29385 };
29386
29387 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29388 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29389 { /* 0: HPT36x */
29390 .name = DRV_NAME,
29391 .init_chipset = init_chipset_hpt366,
29392 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29393 index 8126824..55a2798 100644
29394 --- a/drivers/ide/ide-cd.c
29395 +++ b/drivers/ide/ide-cd.c
29396 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29397 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29398 if ((unsigned long)buf & alignment
29399 || blk_rq_bytes(rq) & q->dma_pad_mask
29400 - || object_is_on_stack(buf))
29401 + || object_starts_on_stack(buf))
29402 drive->dma = 0;
29403 }
29404 }
29405 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29406 index a743e68..1cfd674 100644
29407 --- a/drivers/ide/ide-pci-generic.c
29408 +++ b/drivers/ide/ide-pci-generic.c
29409 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29410 .udma_mask = ATA_UDMA6, \
29411 }
29412
29413 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
29414 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
29415 /* 0: Unknown */
29416 DECLARE_GENERIC_PCI_DEV(0),
29417
29418 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29419 index 560e66d..d5dd180 100644
29420 --- a/drivers/ide/it8172.c
29421 +++ b/drivers/ide/it8172.c
29422 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29423 .set_dma_mode = it8172_set_dma_mode,
29424 };
29425
29426 -static const struct ide_port_info it8172_port_info __devinitdata = {
29427 +static const struct ide_port_info it8172_port_info __devinitconst = {
29428 .name = DRV_NAME,
29429 .port_ops = &it8172_port_ops,
29430 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29431 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29432 index 46816ba..1847aeb 100644
29433 --- a/drivers/ide/it8213.c
29434 +++ b/drivers/ide/it8213.c
29435 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29436 .cable_detect = it8213_cable_detect,
29437 };
29438
29439 -static const struct ide_port_info it8213_chipset __devinitdata = {
29440 +static const struct ide_port_info it8213_chipset __devinitconst = {
29441 .name = DRV_NAME,
29442 .enablebits = { {0x41, 0x80, 0x80} },
29443 .port_ops = &it8213_port_ops,
29444 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29445 index 2e3169f..c5611db 100644
29446 --- a/drivers/ide/it821x.c
29447 +++ b/drivers/ide/it821x.c
29448 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29449 .cable_detect = it821x_cable_detect,
29450 };
29451
29452 -static const struct ide_port_info it821x_chipset __devinitdata = {
29453 +static const struct ide_port_info it821x_chipset __devinitconst = {
29454 .name = DRV_NAME,
29455 .init_chipset = init_chipset_it821x,
29456 .init_hwif = init_hwif_it821x,
29457 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29458 index 74c2c4a..efddd7d 100644
29459 --- a/drivers/ide/jmicron.c
29460 +++ b/drivers/ide/jmicron.c
29461 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29462 .cable_detect = jmicron_cable_detect,
29463 };
29464
29465 -static const struct ide_port_info jmicron_chipset __devinitdata = {
29466 +static const struct ide_port_info jmicron_chipset __devinitconst = {
29467 .name = DRV_NAME,
29468 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29469 .port_ops = &jmicron_port_ops,
29470 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29471 index 95327a2..73f78d8 100644
29472 --- a/drivers/ide/ns87415.c
29473 +++ b/drivers/ide/ns87415.c
29474 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29475 .dma_sff_read_status = superio_dma_sff_read_status,
29476 };
29477
29478 -static const struct ide_port_info ns87415_chipset __devinitdata = {
29479 +static const struct ide_port_info ns87415_chipset __devinitconst = {
29480 .name = DRV_NAME,
29481 .init_hwif = init_hwif_ns87415,
29482 .tp_ops = &ns87415_tp_ops,
29483 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29484 index 1a53a4c..39edc66 100644
29485 --- a/drivers/ide/opti621.c
29486 +++ b/drivers/ide/opti621.c
29487 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29488 .set_pio_mode = opti621_set_pio_mode,
29489 };
29490
29491 -static const struct ide_port_info opti621_chipset __devinitdata = {
29492 +static const struct ide_port_info opti621_chipset __devinitconst = {
29493 .name = DRV_NAME,
29494 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29495 .port_ops = &opti621_port_ops,
29496 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29497 index 9546fe2..2e5ceb6 100644
29498 --- a/drivers/ide/pdc202xx_new.c
29499 +++ b/drivers/ide/pdc202xx_new.c
29500 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29501 .udma_mask = udma, \
29502 }
29503
29504 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29505 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29506 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29507 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29508 };
29509 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29510 index 3a35ec6..5634510 100644
29511 --- a/drivers/ide/pdc202xx_old.c
29512 +++ b/drivers/ide/pdc202xx_old.c
29513 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29514 .max_sectors = sectors, \
29515 }
29516
29517 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29518 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29519 { /* 0: PDC20246 */
29520 .name = DRV_NAME,
29521 .init_chipset = init_chipset_pdc202xx,
29522 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29523 index 1892e81..fe0fd60 100644
29524 --- a/drivers/ide/piix.c
29525 +++ b/drivers/ide/piix.c
29526 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29527 .udma_mask = udma, \
29528 }
29529
29530 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
29531 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
29532 /* 0: MPIIX */
29533 { /*
29534 * MPIIX actually has only a single IDE channel mapped to
29535 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29536 index a6414a8..c04173e 100644
29537 --- a/drivers/ide/rz1000.c
29538 +++ b/drivers/ide/rz1000.c
29539 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29540 }
29541 }
29542
29543 -static const struct ide_port_info rz1000_chipset __devinitdata = {
29544 +static const struct ide_port_info rz1000_chipset __devinitconst = {
29545 .name = DRV_NAME,
29546 .host_flags = IDE_HFLAG_NO_DMA,
29547 };
29548 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29549 index 356b9b5..d4758eb 100644
29550 --- a/drivers/ide/sc1200.c
29551 +++ b/drivers/ide/sc1200.c
29552 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29553 .dma_sff_read_status = ide_dma_sff_read_status,
29554 };
29555
29556 -static const struct ide_port_info sc1200_chipset __devinitdata = {
29557 +static const struct ide_port_info sc1200_chipset __devinitconst = {
29558 .name = DRV_NAME,
29559 .port_ops = &sc1200_port_ops,
29560 .dma_ops = &sc1200_dma_ops,
29561 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29562 index b7f5b0c..9701038 100644
29563 --- a/drivers/ide/scc_pata.c
29564 +++ b/drivers/ide/scc_pata.c
29565 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29566 .dma_sff_read_status = scc_dma_sff_read_status,
29567 };
29568
29569 -static const struct ide_port_info scc_chipset __devinitdata = {
29570 +static const struct ide_port_info scc_chipset __devinitconst = {
29571 .name = "sccIDE",
29572 .init_iops = init_iops_scc,
29573 .init_dma = scc_init_dma,
29574 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29575 index 35fb8da..24d72ef 100644
29576 --- a/drivers/ide/serverworks.c
29577 +++ b/drivers/ide/serverworks.c
29578 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29579 .cable_detect = svwks_cable_detect,
29580 };
29581
29582 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29583 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29584 { /* 0: OSB4 */
29585 .name = DRV_NAME,
29586 .init_chipset = init_chipset_svwks,
29587 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29588 index ddeda44..46f7e30 100644
29589 --- a/drivers/ide/siimage.c
29590 +++ b/drivers/ide/siimage.c
29591 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29592 .udma_mask = ATA_UDMA6, \
29593 }
29594
29595 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29596 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29597 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29598 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29599 };
29600 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29601 index 4a00225..09e61b4 100644
29602 --- a/drivers/ide/sis5513.c
29603 +++ b/drivers/ide/sis5513.c
29604 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29605 .cable_detect = sis_cable_detect,
29606 };
29607
29608 -static const struct ide_port_info sis5513_chipset __devinitdata = {
29609 +static const struct ide_port_info sis5513_chipset __devinitconst = {
29610 .name = DRV_NAME,
29611 .init_chipset = init_chipset_sis5513,
29612 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29613 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29614 index f21dc2a..d051cd2 100644
29615 --- a/drivers/ide/sl82c105.c
29616 +++ b/drivers/ide/sl82c105.c
29617 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29618 .dma_sff_read_status = ide_dma_sff_read_status,
29619 };
29620
29621 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
29622 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
29623 .name = DRV_NAME,
29624 .init_chipset = init_chipset_sl82c105,
29625 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29626 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29627 index 864ffe0..863a5e9 100644
29628 --- a/drivers/ide/slc90e66.c
29629 +++ b/drivers/ide/slc90e66.c
29630 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29631 .cable_detect = slc90e66_cable_detect,
29632 };
29633
29634 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
29635 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
29636 .name = DRV_NAME,
29637 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29638 .port_ops = &slc90e66_port_ops,
29639 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29640 index 4799d5c..1794678 100644
29641 --- a/drivers/ide/tc86c001.c
29642 +++ b/drivers/ide/tc86c001.c
29643 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29644 .dma_sff_read_status = ide_dma_sff_read_status,
29645 };
29646
29647 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
29648 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
29649 .name = DRV_NAME,
29650 .init_hwif = init_hwif_tc86c001,
29651 .port_ops = &tc86c001_port_ops,
29652 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29653 index 281c914..55ce1b8 100644
29654 --- a/drivers/ide/triflex.c
29655 +++ b/drivers/ide/triflex.c
29656 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29657 .set_dma_mode = triflex_set_mode,
29658 };
29659
29660 -static const struct ide_port_info triflex_device __devinitdata = {
29661 +static const struct ide_port_info triflex_device __devinitconst = {
29662 .name = DRV_NAME,
29663 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29664 .port_ops = &triflex_port_ops,
29665 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29666 index 4b42ca0..e494a98 100644
29667 --- a/drivers/ide/trm290.c
29668 +++ b/drivers/ide/trm290.c
29669 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29670 .dma_check = trm290_dma_check,
29671 };
29672
29673 -static const struct ide_port_info trm290_chipset __devinitdata = {
29674 +static const struct ide_port_info trm290_chipset __devinitconst = {
29675 .name = DRV_NAME,
29676 .init_hwif = init_hwif_trm290,
29677 .tp_ops = &trm290_tp_ops,
29678 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29679 index f46f49c..eb77678 100644
29680 --- a/drivers/ide/via82cxxx.c
29681 +++ b/drivers/ide/via82cxxx.c
29682 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29683 .cable_detect = via82cxxx_cable_detect,
29684 };
29685
29686 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29687 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29688 .name = DRV_NAME,
29689 .init_chipset = init_chipset_via82cxxx,
29690 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29691 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29692 index eb0e2cc..14241c7 100644
29693 --- a/drivers/ieee802154/fakehard.c
29694 +++ b/drivers/ieee802154/fakehard.c
29695 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29696 phy->transmit_power = 0xbf;
29697
29698 dev->netdev_ops = &fake_ops;
29699 - dev->ml_priv = &fake_mlme;
29700 + dev->ml_priv = (void *)&fake_mlme;
29701
29702 priv = netdev_priv(dev);
29703 priv->phy = phy;
29704 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29705 index 8b72f39..55df4c8 100644
29706 --- a/drivers/infiniband/core/cm.c
29707 +++ b/drivers/infiniband/core/cm.c
29708 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29709
29710 struct cm_counter_group {
29711 struct kobject obj;
29712 - atomic_long_t counter[CM_ATTR_COUNT];
29713 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29714 };
29715
29716 struct cm_counter_attribute {
29717 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29718 struct ib_mad_send_buf *msg = NULL;
29719 int ret;
29720
29721 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29722 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29723 counter[CM_REQ_COUNTER]);
29724
29725 /* Quick state check to discard duplicate REQs. */
29726 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29727 if (!cm_id_priv)
29728 return;
29729
29730 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29731 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29732 counter[CM_REP_COUNTER]);
29733 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29734 if (ret)
29735 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
29736 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29737 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29738 spin_unlock_irq(&cm_id_priv->lock);
29739 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29740 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29741 counter[CM_RTU_COUNTER]);
29742 goto out;
29743 }
29744 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
29745 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29746 dreq_msg->local_comm_id);
29747 if (!cm_id_priv) {
29748 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29749 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29750 counter[CM_DREQ_COUNTER]);
29751 cm_issue_drep(work->port, work->mad_recv_wc);
29752 return -EINVAL;
29753 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
29754 case IB_CM_MRA_REP_RCVD:
29755 break;
29756 case IB_CM_TIMEWAIT:
29757 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29758 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29759 counter[CM_DREQ_COUNTER]);
29760 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29761 goto unlock;
29762 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
29763 cm_free_msg(msg);
29764 goto deref;
29765 case IB_CM_DREQ_RCVD:
29766 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29767 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29768 counter[CM_DREQ_COUNTER]);
29769 goto unlock;
29770 default:
29771 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
29772 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29773 cm_id_priv->msg, timeout)) {
29774 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29775 - atomic_long_inc(&work->port->
29776 + atomic_long_inc_unchecked(&work->port->
29777 counter_group[CM_RECV_DUPLICATES].
29778 counter[CM_MRA_COUNTER]);
29779 goto out;
29780 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
29781 break;
29782 case IB_CM_MRA_REQ_RCVD:
29783 case IB_CM_MRA_REP_RCVD:
29784 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29785 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29786 counter[CM_MRA_COUNTER]);
29787 /* fall through */
29788 default:
29789 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
29790 case IB_CM_LAP_IDLE:
29791 break;
29792 case IB_CM_MRA_LAP_SENT:
29793 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29794 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29795 counter[CM_LAP_COUNTER]);
29796 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29797 goto unlock;
29798 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
29799 cm_free_msg(msg);
29800 goto deref;
29801 case IB_CM_LAP_RCVD:
29802 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29803 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29804 counter[CM_LAP_COUNTER]);
29805 goto unlock;
29806 default:
29807 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
29808 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29809 if (cur_cm_id_priv) {
29810 spin_unlock_irq(&cm.lock);
29811 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29812 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29813 counter[CM_SIDR_REQ_COUNTER]);
29814 goto out; /* Duplicate message. */
29815 }
29816 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
29817 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29818 msg->retries = 1;
29819
29820 - atomic_long_add(1 + msg->retries,
29821 + atomic_long_add_unchecked(1 + msg->retries,
29822 &port->counter_group[CM_XMIT].counter[attr_index]);
29823 if (msg->retries)
29824 - atomic_long_add(msg->retries,
29825 + atomic_long_add_unchecked(msg->retries,
29826 &port->counter_group[CM_XMIT_RETRIES].
29827 counter[attr_index]);
29828
29829 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
29830 }
29831
29832 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29833 - atomic_long_inc(&port->counter_group[CM_RECV].
29834 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29835 counter[attr_id - CM_ATTR_ID_OFFSET]);
29836
29837 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29838 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
29839 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29840
29841 return sprintf(buf, "%ld\n",
29842 - atomic_long_read(&group->counter[cm_attr->index]));
29843 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29844 }
29845
29846 static const struct sysfs_ops cm_counter_ops = {
29847 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29848 index 176c8f9..2627b62 100644
29849 --- a/drivers/infiniband/core/fmr_pool.c
29850 +++ b/drivers/infiniband/core/fmr_pool.c
29851 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
29852
29853 struct task_struct *thread;
29854
29855 - atomic_t req_ser;
29856 - atomic_t flush_ser;
29857 + atomic_unchecked_t req_ser;
29858 + atomic_unchecked_t flush_ser;
29859
29860 wait_queue_head_t force_wait;
29861 };
29862 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29863 struct ib_fmr_pool *pool = pool_ptr;
29864
29865 do {
29866 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29867 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29868 ib_fmr_batch_release(pool);
29869
29870 - atomic_inc(&pool->flush_ser);
29871 + atomic_inc_unchecked(&pool->flush_ser);
29872 wake_up_interruptible(&pool->force_wait);
29873
29874 if (pool->flush_function)
29875 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29876 }
29877
29878 set_current_state(TASK_INTERRUPTIBLE);
29879 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29880 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29881 !kthread_should_stop())
29882 schedule();
29883 __set_current_state(TASK_RUNNING);
29884 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
29885 pool->dirty_watermark = params->dirty_watermark;
29886 pool->dirty_len = 0;
29887 spin_lock_init(&pool->pool_lock);
29888 - atomic_set(&pool->req_ser, 0);
29889 - atomic_set(&pool->flush_ser, 0);
29890 + atomic_set_unchecked(&pool->req_ser, 0);
29891 + atomic_set_unchecked(&pool->flush_ser, 0);
29892 init_waitqueue_head(&pool->force_wait);
29893
29894 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29895 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
29896 }
29897 spin_unlock_irq(&pool->pool_lock);
29898
29899 - serial = atomic_inc_return(&pool->req_ser);
29900 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29901 wake_up_process(pool->thread);
29902
29903 if (wait_event_interruptible(pool->force_wait,
29904 - atomic_read(&pool->flush_ser) - serial >= 0))
29905 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29906 return -EINTR;
29907
29908 return 0;
29909 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
29910 } else {
29911 list_add_tail(&fmr->list, &pool->dirty_list);
29912 if (++pool->dirty_len >= pool->dirty_watermark) {
29913 - atomic_inc(&pool->req_ser);
29914 + atomic_inc_unchecked(&pool->req_ser);
29915 wake_up_process(pool->thread);
29916 }
29917 }
29918 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
29919 index 40c8353..946b0e4 100644
29920 --- a/drivers/infiniband/hw/cxgb4/mem.c
29921 +++ b/drivers/infiniband/hw/cxgb4/mem.c
29922 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29923 int err;
29924 struct fw_ri_tpte tpt;
29925 u32 stag_idx;
29926 - static atomic_t key;
29927 + static atomic_unchecked_t key;
29928
29929 if (c4iw_fatal_error(rdev))
29930 return -EIO;
29931 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29932 &rdev->resource.tpt_fifo_lock);
29933 if (!stag_idx)
29934 return -ENOMEM;
29935 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
29936 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
29937 }
29938 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
29939 __func__, stag_state, type, pdid, stag_idx);
29940 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
29941 index 79b3dbc..96e5fcc 100644
29942 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
29943 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
29944 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29945 struct ib_atomic_eth *ateth;
29946 struct ipath_ack_entry *e;
29947 u64 vaddr;
29948 - atomic64_t *maddr;
29949 + atomic64_unchecked_t *maddr;
29950 u64 sdata;
29951 u32 rkey;
29952 u8 next;
29953 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29954 IB_ACCESS_REMOTE_ATOMIC)))
29955 goto nack_acc_unlck;
29956 /* Perform atomic OP and save result. */
29957 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29958 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29959 sdata = be64_to_cpu(ateth->swap_data);
29960 e = &qp->s_ack_queue[qp->r_head_ack_queue];
29961 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
29962 - (u64) atomic64_add_return(sdata, maddr) - sdata :
29963 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
29964 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
29965 be64_to_cpu(ateth->compare_data),
29966 sdata);
29967 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
29968 index 1f95bba..9530f87 100644
29969 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
29970 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
29971 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
29972 unsigned long flags;
29973 struct ib_wc wc;
29974 u64 sdata;
29975 - atomic64_t *maddr;
29976 + atomic64_unchecked_t *maddr;
29977 enum ib_wc_status send_status;
29978
29979 /*
29980 @@ -382,11 +382,11 @@ again:
29981 IB_ACCESS_REMOTE_ATOMIC)))
29982 goto acc_err;
29983 /* Perform atomic OP and save result. */
29984 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29985 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29986 sdata = wqe->wr.wr.atomic.compare_add;
29987 *(u64 *) sqp->s_sge.sge.vaddr =
29988 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
29989 - (u64) atomic64_add_return(sdata, maddr) - sdata :
29990 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
29991 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
29992 sdata, wqe->wr.wr.atomic.swap);
29993 goto send_comp;
29994 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
29995 index 5965b3d..16817fb 100644
29996 --- a/drivers/infiniband/hw/nes/nes.c
29997 +++ b/drivers/infiniband/hw/nes/nes.c
29998 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
29999 LIST_HEAD(nes_adapter_list);
30000 static LIST_HEAD(nes_dev_list);
30001
30002 -atomic_t qps_destroyed;
30003 +atomic_unchecked_t qps_destroyed;
30004
30005 static unsigned int ee_flsh_adapter;
30006 static unsigned int sysfs_nonidx_addr;
30007 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30008 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30009 struct nes_adapter *nesadapter = nesdev->nesadapter;
30010
30011 - atomic_inc(&qps_destroyed);
30012 + atomic_inc_unchecked(&qps_destroyed);
30013
30014 /* Free the control structures */
30015
30016 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30017 index 568b4f1..5ea3eff 100644
30018 --- a/drivers/infiniband/hw/nes/nes.h
30019 +++ b/drivers/infiniband/hw/nes/nes.h
30020 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30021 extern unsigned int wqm_quanta;
30022 extern struct list_head nes_adapter_list;
30023
30024 -extern atomic_t cm_connects;
30025 -extern atomic_t cm_accepts;
30026 -extern atomic_t cm_disconnects;
30027 -extern atomic_t cm_closes;
30028 -extern atomic_t cm_connecteds;
30029 -extern atomic_t cm_connect_reqs;
30030 -extern atomic_t cm_rejects;
30031 -extern atomic_t mod_qp_timouts;
30032 -extern atomic_t qps_created;
30033 -extern atomic_t qps_destroyed;
30034 -extern atomic_t sw_qps_destroyed;
30035 +extern atomic_unchecked_t cm_connects;
30036 +extern atomic_unchecked_t cm_accepts;
30037 +extern atomic_unchecked_t cm_disconnects;
30038 +extern atomic_unchecked_t cm_closes;
30039 +extern atomic_unchecked_t cm_connecteds;
30040 +extern atomic_unchecked_t cm_connect_reqs;
30041 +extern atomic_unchecked_t cm_rejects;
30042 +extern atomic_unchecked_t mod_qp_timouts;
30043 +extern atomic_unchecked_t qps_created;
30044 +extern atomic_unchecked_t qps_destroyed;
30045 +extern atomic_unchecked_t sw_qps_destroyed;
30046 extern u32 mh_detected;
30047 extern u32 mh_pauses_sent;
30048 extern u32 cm_packets_sent;
30049 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30050 extern u32 cm_packets_received;
30051 extern u32 cm_packets_dropped;
30052 extern u32 cm_packets_retrans;
30053 -extern atomic_t cm_listens_created;
30054 -extern atomic_t cm_listens_destroyed;
30055 +extern atomic_unchecked_t cm_listens_created;
30056 +extern atomic_unchecked_t cm_listens_destroyed;
30057 extern u32 cm_backlog_drops;
30058 -extern atomic_t cm_loopbacks;
30059 -extern atomic_t cm_nodes_created;
30060 -extern atomic_t cm_nodes_destroyed;
30061 -extern atomic_t cm_accel_dropped_pkts;
30062 -extern atomic_t cm_resets_recvd;
30063 -extern atomic_t pau_qps_created;
30064 -extern atomic_t pau_qps_destroyed;
30065 +extern atomic_unchecked_t cm_loopbacks;
30066 +extern atomic_unchecked_t cm_nodes_created;
30067 +extern atomic_unchecked_t cm_nodes_destroyed;
30068 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30069 +extern atomic_unchecked_t cm_resets_recvd;
30070 +extern atomic_unchecked_t pau_qps_created;
30071 +extern atomic_unchecked_t pau_qps_destroyed;
30072
30073 extern u32 int_mod_timer_init;
30074 extern u32 int_mod_cq_depth_256;
30075 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30076 index 0a52d72..0642f36 100644
30077 --- a/drivers/infiniband/hw/nes/nes_cm.c
30078 +++ b/drivers/infiniband/hw/nes/nes_cm.c
30079 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30080 u32 cm_packets_retrans;
30081 u32 cm_packets_created;
30082 u32 cm_packets_received;
30083 -atomic_t cm_listens_created;
30084 -atomic_t cm_listens_destroyed;
30085 +atomic_unchecked_t cm_listens_created;
30086 +atomic_unchecked_t cm_listens_destroyed;
30087 u32 cm_backlog_drops;
30088 -atomic_t cm_loopbacks;
30089 -atomic_t cm_nodes_created;
30090 -atomic_t cm_nodes_destroyed;
30091 -atomic_t cm_accel_dropped_pkts;
30092 -atomic_t cm_resets_recvd;
30093 +atomic_unchecked_t cm_loopbacks;
30094 +atomic_unchecked_t cm_nodes_created;
30095 +atomic_unchecked_t cm_nodes_destroyed;
30096 +atomic_unchecked_t cm_accel_dropped_pkts;
30097 +atomic_unchecked_t cm_resets_recvd;
30098
30099 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30100 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30101 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30102
30103 static struct nes_cm_core *g_cm_core;
30104
30105 -atomic_t cm_connects;
30106 -atomic_t cm_accepts;
30107 -atomic_t cm_disconnects;
30108 -atomic_t cm_closes;
30109 -atomic_t cm_connecteds;
30110 -atomic_t cm_connect_reqs;
30111 -atomic_t cm_rejects;
30112 +atomic_unchecked_t cm_connects;
30113 +atomic_unchecked_t cm_accepts;
30114 +atomic_unchecked_t cm_disconnects;
30115 +atomic_unchecked_t cm_closes;
30116 +atomic_unchecked_t cm_connecteds;
30117 +atomic_unchecked_t cm_connect_reqs;
30118 +atomic_unchecked_t cm_rejects;
30119
30120 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30121 {
30122 @@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30123 kfree(listener);
30124 listener = NULL;
30125 ret = 0;
30126 - atomic_inc(&cm_listens_destroyed);
30127 + atomic_inc_unchecked(&cm_listens_destroyed);
30128 } else {
30129 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30130 }
30131 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30132 cm_node->rem_mac);
30133
30134 add_hte_node(cm_core, cm_node);
30135 - atomic_inc(&cm_nodes_created);
30136 + atomic_inc_unchecked(&cm_nodes_created);
30137
30138 return cm_node;
30139 }
30140 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30141 }
30142
30143 atomic_dec(&cm_core->node_cnt);
30144 - atomic_inc(&cm_nodes_destroyed);
30145 + atomic_inc_unchecked(&cm_nodes_destroyed);
30146 nesqp = cm_node->nesqp;
30147 if (nesqp) {
30148 nesqp->cm_node = NULL;
30149 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30150
30151 static void drop_packet(struct sk_buff *skb)
30152 {
30153 - atomic_inc(&cm_accel_dropped_pkts);
30154 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30155 dev_kfree_skb_any(skb);
30156 }
30157
30158 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30159 {
30160
30161 int reset = 0; /* whether to send reset in case of err.. */
30162 - atomic_inc(&cm_resets_recvd);
30163 + atomic_inc_unchecked(&cm_resets_recvd);
30164 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30165 " refcnt=%d\n", cm_node, cm_node->state,
30166 atomic_read(&cm_node->ref_count));
30167 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30168 rem_ref_cm_node(cm_node->cm_core, cm_node);
30169 return NULL;
30170 }
30171 - atomic_inc(&cm_loopbacks);
30172 + atomic_inc_unchecked(&cm_loopbacks);
30173 loopbackremotenode->loopbackpartner = cm_node;
30174 loopbackremotenode->tcp_cntxt.rcv_wscale =
30175 NES_CM_DEFAULT_RCV_WND_SCALE;
30176 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30177 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30178 else {
30179 rem_ref_cm_node(cm_core, cm_node);
30180 - atomic_inc(&cm_accel_dropped_pkts);
30181 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30182 dev_kfree_skb_any(skb);
30183 }
30184 break;
30185 @@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30186
30187 if ((cm_id) && (cm_id->event_handler)) {
30188 if (issue_disconn) {
30189 - atomic_inc(&cm_disconnects);
30190 + atomic_inc_unchecked(&cm_disconnects);
30191 cm_event.event = IW_CM_EVENT_DISCONNECT;
30192 cm_event.status = disconn_status;
30193 cm_event.local_addr = cm_id->local_addr;
30194 @@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30195 }
30196
30197 if (issue_close) {
30198 - atomic_inc(&cm_closes);
30199 + atomic_inc_unchecked(&cm_closes);
30200 nes_disconnect(nesqp, 1);
30201
30202 cm_id->provider_data = nesqp;
30203 @@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30204
30205 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30206 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30207 - atomic_inc(&cm_accepts);
30208 + atomic_inc_unchecked(&cm_accepts);
30209
30210 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30211 netdev_refcnt_read(nesvnic->netdev));
30212 @@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30213 struct nes_cm_core *cm_core;
30214 u8 *start_buff;
30215
30216 - atomic_inc(&cm_rejects);
30217 + atomic_inc_unchecked(&cm_rejects);
30218 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30219 loopback = cm_node->loopbackpartner;
30220 cm_core = cm_node->cm_core;
30221 @@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30222 ntohl(cm_id->local_addr.sin_addr.s_addr),
30223 ntohs(cm_id->local_addr.sin_port));
30224
30225 - atomic_inc(&cm_connects);
30226 + atomic_inc_unchecked(&cm_connects);
30227 nesqp->active_conn = 1;
30228
30229 /* cache the cm_id in the qp */
30230 @@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30231 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30232 return err;
30233 }
30234 - atomic_inc(&cm_listens_created);
30235 + atomic_inc_unchecked(&cm_listens_created);
30236 }
30237
30238 cm_id->add_ref(cm_id);
30239 @@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30240
30241 if (nesqp->destroyed)
30242 return;
30243 - atomic_inc(&cm_connecteds);
30244 + atomic_inc_unchecked(&cm_connecteds);
30245 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30246 " local port 0x%04X. jiffies = %lu.\n",
30247 nesqp->hwqp.qp_id,
30248 @@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30249
30250 cm_id->add_ref(cm_id);
30251 ret = cm_id->event_handler(cm_id, &cm_event);
30252 - atomic_inc(&cm_closes);
30253 + atomic_inc_unchecked(&cm_closes);
30254 cm_event.event = IW_CM_EVENT_CLOSE;
30255 cm_event.status = 0;
30256 cm_event.provider_data = cm_id->provider_data;
30257 @@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30258 return;
30259 cm_id = cm_node->cm_id;
30260
30261 - atomic_inc(&cm_connect_reqs);
30262 + atomic_inc_unchecked(&cm_connect_reqs);
30263 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30264 cm_node, cm_id, jiffies);
30265
30266 @@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30267 return;
30268 cm_id = cm_node->cm_id;
30269
30270 - atomic_inc(&cm_connect_reqs);
30271 + atomic_inc_unchecked(&cm_connect_reqs);
30272 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30273 cm_node, cm_id, jiffies);
30274
30275 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30276 index b3b2a24..7bfaf1e 100644
30277 --- a/drivers/infiniband/hw/nes/nes_mgt.c
30278 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
30279 @@ -40,8 +40,8 @@
30280 #include "nes.h"
30281 #include "nes_mgt.h"
30282
30283 -atomic_t pau_qps_created;
30284 -atomic_t pau_qps_destroyed;
30285 +atomic_unchecked_t pau_qps_created;
30286 +atomic_unchecked_t pau_qps_destroyed;
30287
30288 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30289 {
30290 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30291 {
30292 struct sk_buff *skb;
30293 unsigned long flags;
30294 - atomic_inc(&pau_qps_destroyed);
30295 + atomic_inc_unchecked(&pau_qps_destroyed);
30296
30297 /* Free packets that have not yet been forwarded */
30298 /* Lock is acquired by skb_dequeue when removing the skb */
30299 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30300 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30301 skb_queue_head_init(&nesqp->pau_list);
30302 spin_lock_init(&nesqp->pau_lock);
30303 - atomic_inc(&pau_qps_created);
30304 + atomic_inc_unchecked(&pau_qps_created);
30305 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30306 }
30307
30308 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30309 index c00d2f3..8834298 100644
30310 --- a/drivers/infiniband/hw/nes/nes_nic.c
30311 +++ b/drivers/infiniband/hw/nes/nes_nic.c
30312 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30313 target_stat_values[++index] = mh_detected;
30314 target_stat_values[++index] = mh_pauses_sent;
30315 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30316 - target_stat_values[++index] = atomic_read(&cm_connects);
30317 - target_stat_values[++index] = atomic_read(&cm_accepts);
30318 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30319 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30320 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30321 - target_stat_values[++index] = atomic_read(&cm_rejects);
30322 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30323 - target_stat_values[++index] = atomic_read(&qps_created);
30324 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30325 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30326 - target_stat_values[++index] = atomic_read(&cm_closes);
30327 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30328 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30329 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30330 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30331 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30332 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30333 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30334 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30335 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30336 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30337 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30338 target_stat_values[++index] = cm_packets_sent;
30339 target_stat_values[++index] = cm_packets_bounced;
30340 target_stat_values[++index] = cm_packets_created;
30341 target_stat_values[++index] = cm_packets_received;
30342 target_stat_values[++index] = cm_packets_dropped;
30343 target_stat_values[++index] = cm_packets_retrans;
30344 - target_stat_values[++index] = atomic_read(&cm_listens_created);
30345 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30346 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30347 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30348 target_stat_values[++index] = cm_backlog_drops;
30349 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30350 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30351 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30352 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30353 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30354 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30355 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30356 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30357 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30358 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30359 target_stat_values[++index] = nesadapter->free_4kpbl;
30360 target_stat_values[++index] = nesadapter->free_256pbl;
30361 target_stat_values[++index] = int_mod_timer_init;
30362 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30363 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30364 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30365 - target_stat_values[++index] = atomic_read(&pau_qps_created);
30366 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30367 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30368 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30369 }
30370
30371 /**
30372 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30373 index 5095bc4..41e8fff 100644
30374 --- a/drivers/infiniband/hw/nes/nes_verbs.c
30375 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
30376 @@ -46,9 +46,9 @@
30377
30378 #include <rdma/ib_umem.h>
30379
30380 -atomic_t mod_qp_timouts;
30381 -atomic_t qps_created;
30382 -atomic_t sw_qps_destroyed;
30383 +atomic_unchecked_t mod_qp_timouts;
30384 +atomic_unchecked_t qps_created;
30385 +atomic_unchecked_t sw_qps_destroyed;
30386
30387 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30388
30389 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30390 if (init_attr->create_flags)
30391 return ERR_PTR(-EINVAL);
30392
30393 - atomic_inc(&qps_created);
30394 + atomic_inc_unchecked(&qps_created);
30395 switch (init_attr->qp_type) {
30396 case IB_QPT_RC:
30397 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30398 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30399 struct iw_cm_event cm_event;
30400 int ret = 0;
30401
30402 - atomic_inc(&sw_qps_destroyed);
30403 + atomic_inc_unchecked(&sw_qps_destroyed);
30404 nesqp->destroyed = 1;
30405
30406 /* Blow away the connection if it exists. */
30407 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30408 index b881bdc..c2e360c 100644
30409 --- a/drivers/infiniband/hw/qib/qib.h
30410 +++ b/drivers/infiniband/hw/qib/qib.h
30411 @@ -51,6 +51,7 @@
30412 #include <linux/completion.h>
30413 #include <linux/kref.h>
30414 #include <linux/sched.h>
30415 +#include <linux/slab.h>
30416
30417 #include "qib_common.h"
30418 #include "qib_verbs.h"
30419 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30420 index c351aa4..e6967c2 100644
30421 --- a/drivers/input/gameport/gameport.c
30422 +++ b/drivers/input/gameport/gameport.c
30423 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30424 */
30425 static void gameport_init_port(struct gameport *gameport)
30426 {
30427 - static atomic_t gameport_no = ATOMIC_INIT(0);
30428 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30429
30430 __module_get(THIS_MODULE);
30431
30432 mutex_init(&gameport->drv_mutex);
30433 device_initialize(&gameport->dev);
30434 dev_set_name(&gameport->dev, "gameport%lu",
30435 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
30436 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30437 gameport->dev.bus = &gameport_bus;
30438 gameport->dev.release = gameport_release_port;
30439 if (gameport->parent)
30440 diff --git a/drivers/input/input.c b/drivers/input/input.c
30441 index da38d97..2aa0b79 100644
30442 --- a/drivers/input/input.c
30443 +++ b/drivers/input/input.c
30444 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30445 */
30446 int input_register_device(struct input_dev *dev)
30447 {
30448 - static atomic_t input_no = ATOMIC_INIT(0);
30449 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30450 struct input_handler *handler;
30451 const char *path;
30452 int error;
30453 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30454 dev->setkeycode = input_default_setkeycode;
30455
30456 dev_set_name(&dev->dev, "input%ld",
30457 - (unsigned long) atomic_inc_return(&input_no) - 1);
30458 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30459
30460 error = device_add(&dev->dev);
30461 if (error)
30462 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30463 index b8d8611..7a4a04b 100644
30464 --- a/drivers/input/joystick/sidewinder.c
30465 +++ b/drivers/input/joystick/sidewinder.c
30466 @@ -30,6 +30,7 @@
30467 #include <linux/kernel.h>
30468 #include <linux/module.h>
30469 #include <linux/slab.h>
30470 +#include <linux/sched.h>
30471 #include <linux/init.h>
30472 #include <linux/input.h>
30473 #include <linux/gameport.h>
30474 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30475 index d728875..844c89b 100644
30476 --- a/drivers/input/joystick/xpad.c
30477 +++ b/drivers/input/joystick/xpad.c
30478 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30479
30480 static int xpad_led_probe(struct usb_xpad *xpad)
30481 {
30482 - static atomic_t led_seq = ATOMIC_INIT(0);
30483 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30484 long led_no;
30485 struct xpad_led *led;
30486 struct led_classdev *led_cdev;
30487 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30488 if (!led)
30489 return -ENOMEM;
30490
30491 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30492 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30493
30494 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30495 led->xpad = xpad;
30496 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30497 index 0110b5a..d3ad144 100644
30498 --- a/drivers/input/mousedev.c
30499 +++ b/drivers/input/mousedev.c
30500 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30501
30502 spin_unlock_irq(&client->packet_lock);
30503
30504 - if (copy_to_user(buffer, data, count))
30505 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
30506 return -EFAULT;
30507
30508 return count;
30509 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30510 index ba70058..571d25d 100644
30511 --- a/drivers/input/serio/serio.c
30512 +++ b/drivers/input/serio/serio.c
30513 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30514 */
30515 static void serio_init_port(struct serio *serio)
30516 {
30517 - static atomic_t serio_no = ATOMIC_INIT(0);
30518 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30519
30520 __module_get(THIS_MODULE);
30521
30522 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30523 mutex_init(&serio->drv_mutex);
30524 device_initialize(&serio->dev);
30525 dev_set_name(&serio->dev, "serio%ld",
30526 - (long)atomic_inc_return(&serio_no) - 1);
30527 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30528 serio->dev.bus = &serio_bus;
30529 serio->dev.release = serio_release_port;
30530 serio->dev.groups = serio_device_attr_groups;
30531 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30532 index e44933d..9ba484a 100644
30533 --- a/drivers/isdn/capi/capi.c
30534 +++ b/drivers/isdn/capi/capi.c
30535 @@ -83,8 +83,8 @@ struct capiminor {
30536
30537 struct capi20_appl *ap;
30538 u32 ncci;
30539 - atomic_t datahandle;
30540 - atomic_t msgid;
30541 + atomic_unchecked_t datahandle;
30542 + atomic_unchecked_t msgid;
30543
30544 struct tty_port port;
30545 int ttyinstop;
30546 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30547 capimsg_setu16(s, 2, mp->ap->applid);
30548 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30549 capimsg_setu8 (s, 5, CAPI_RESP);
30550 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30551 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30552 capimsg_setu32(s, 8, mp->ncci);
30553 capimsg_setu16(s, 12, datahandle);
30554 }
30555 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30556 mp->outbytes -= len;
30557 spin_unlock_bh(&mp->outlock);
30558
30559 - datahandle = atomic_inc_return(&mp->datahandle);
30560 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30561 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30562 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30563 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30564 capimsg_setu16(skb->data, 2, mp->ap->applid);
30565 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30566 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30567 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30568 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30569 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30570 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30571 capimsg_setu16(skb->data, 16, len); /* Data length */
30572 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30573 index db621db..825ea1a 100644
30574 --- a/drivers/isdn/gigaset/common.c
30575 +++ b/drivers/isdn/gigaset/common.c
30576 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30577 cs->commands_pending = 0;
30578 cs->cur_at_seq = 0;
30579 cs->gotfwver = -1;
30580 - cs->open_count = 0;
30581 + local_set(&cs->open_count, 0);
30582 cs->dev = NULL;
30583 cs->tty = NULL;
30584 cs->tty_dev = NULL;
30585 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30586 index 212efaf..f187c6b 100644
30587 --- a/drivers/isdn/gigaset/gigaset.h
30588 +++ b/drivers/isdn/gigaset/gigaset.h
30589 @@ -35,6 +35,7 @@
30590 #include <linux/tty_driver.h>
30591 #include <linux/list.h>
30592 #include <linux/atomic.h>
30593 +#include <asm/local.h>
30594
30595 #define GIG_VERSION {0, 5, 0, 0}
30596 #define GIG_COMPAT {0, 4, 0, 0}
30597 @@ -433,7 +434,7 @@ struct cardstate {
30598 spinlock_t cmdlock;
30599 unsigned curlen, cmdbytes;
30600
30601 - unsigned open_count;
30602 + local_t open_count;
30603 struct tty_struct *tty;
30604 struct tasklet_struct if_wake_tasklet;
30605 unsigned control_state;
30606 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30607 index ee0a549..a7c9798 100644
30608 --- a/drivers/isdn/gigaset/interface.c
30609 +++ b/drivers/isdn/gigaset/interface.c
30610 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30611 }
30612 tty->driver_data = cs;
30613
30614 - ++cs->open_count;
30615 -
30616 - if (cs->open_count == 1) {
30617 + if (local_inc_return(&cs->open_count) == 1) {
30618 spin_lock_irqsave(&cs->lock, flags);
30619 cs->tty = tty;
30620 spin_unlock_irqrestore(&cs->lock, flags);
30621 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30622
30623 if (!cs->connected)
30624 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30625 - else if (!cs->open_count)
30626 + else if (!local_read(&cs->open_count))
30627 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30628 else {
30629 - if (!--cs->open_count) {
30630 + if (!local_dec_return(&cs->open_count)) {
30631 spin_lock_irqsave(&cs->lock, flags);
30632 cs->tty = NULL;
30633 spin_unlock_irqrestore(&cs->lock, flags);
30634 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30635 if (!cs->connected) {
30636 gig_dbg(DEBUG_IF, "not connected");
30637 retval = -ENODEV;
30638 - } else if (!cs->open_count)
30639 + } else if (!local_read(&cs->open_count))
30640 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30641 else {
30642 retval = 0;
30643 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30644 retval = -ENODEV;
30645 goto done;
30646 }
30647 - if (!cs->open_count) {
30648 + if (!local_read(&cs->open_count)) {
30649 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30650 retval = -ENODEV;
30651 goto done;
30652 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30653 if (!cs->connected) {
30654 gig_dbg(DEBUG_IF, "not connected");
30655 retval = -ENODEV;
30656 - } else if (!cs->open_count)
30657 + } else if (!local_read(&cs->open_count))
30658 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30659 else if (cs->mstate != MS_LOCKED) {
30660 dev_warn(cs->dev, "can't write to unlocked device\n");
30661 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30662
30663 if (!cs->connected)
30664 gig_dbg(DEBUG_IF, "not connected");
30665 - else if (!cs->open_count)
30666 + else if (!local_read(&cs->open_count))
30667 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30668 else if (cs->mstate != MS_LOCKED)
30669 dev_warn(cs->dev, "can't write to unlocked device\n");
30670 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30671
30672 if (!cs->connected)
30673 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30674 - else if (!cs->open_count)
30675 + else if (!local_read(&cs->open_count))
30676 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30677 else
30678 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30679 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30680
30681 if (!cs->connected)
30682 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30683 - else if (!cs->open_count)
30684 + else if (!local_read(&cs->open_count))
30685 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30686 else
30687 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30688 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30689 goto out;
30690 }
30691
30692 - if (!cs->open_count) {
30693 + if (!local_read(&cs->open_count)) {
30694 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30695 goto out;
30696 }
30697 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30698 index 2a57da59..e7a12ed 100644
30699 --- a/drivers/isdn/hardware/avm/b1.c
30700 +++ b/drivers/isdn/hardware/avm/b1.c
30701 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30702 }
30703 if (left) {
30704 if (t4file->user) {
30705 - if (copy_from_user(buf, dp, left))
30706 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30707 return -EFAULT;
30708 } else {
30709 memcpy(buf, dp, left);
30710 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30711 }
30712 if (left) {
30713 if (config->user) {
30714 - if (copy_from_user(buf, dp, left))
30715 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30716 return -EFAULT;
30717 } else {
30718 memcpy(buf, dp, left);
30719 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30720 index 85784a7..a19ca98 100644
30721 --- a/drivers/isdn/hardware/eicon/divasync.h
30722 +++ b/drivers/isdn/hardware/eicon/divasync.h
30723 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30724 } diva_didd_add_adapter_t;
30725 typedef struct _diva_didd_remove_adapter {
30726 IDI_CALL p_request;
30727 -} diva_didd_remove_adapter_t;
30728 +} __no_const diva_didd_remove_adapter_t;
30729 typedef struct _diva_didd_read_adapter_array {
30730 void * buffer;
30731 dword length;
30732 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30733 index a3bd163..8956575 100644
30734 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30735 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
30736 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30737 typedef struct _diva_os_idi_adapter_interface {
30738 diva_init_card_proc_t cleanup_adapter_proc;
30739 diva_cmd_card_proc_t cmd_proc;
30740 -} diva_os_idi_adapter_interface_t;
30741 +} __no_const diva_os_idi_adapter_interface_t;
30742
30743 typedef struct _diva_os_xdi_adapter {
30744 struct list_head link;
30745 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
30746 index 1f355bb..43f1fea 100644
30747 --- a/drivers/isdn/icn/icn.c
30748 +++ b/drivers/isdn/icn/icn.c
30749 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
30750 if (count > len)
30751 count = len;
30752 if (user) {
30753 - if (copy_from_user(msg, buf, count))
30754 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30755 return -EFAULT;
30756 } else
30757 memcpy(msg, buf, count);
30758 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
30759 index b5fdcb7..5b6c59f 100644
30760 --- a/drivers/lguest/core.c
30761 +++ b/drivers/lguest/core.c
30762 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
30763 * it's worked so far. The end address needs +1 because __get_vm_area
30764 * allocates an extra guard page, so we need space for that.
30765 */
30766 +
30767 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30768 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30769 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30770 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30771 +#else
30772 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30773 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30774 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30775 +#endif
30776 +
30777 if (!switcher_vma) {
30778 err = -ENOMEM;
30779 printk("lguest: could not map switcher pages high\n");
30780 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
30781 * Now the Switcher is mapped at the right address, we can't fail!
30782 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
30783 */
30784 - memcpy(switcher_vma->addr, start_switcher_text,
30785 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30786 end_switcher_text - start_switcher_text);
30787
30788 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30789 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
30790 index 65af42f..530c87a 100644
30791 --- a/drivers/lguest/x86/core.c
30792 +++ b/drivers/lguest/x86/core.c
30793 @@ -59,7 +59,7 @@ static struct {
30794 /* Offset from where switcher.S was compiled to where we've copied it */
30795 static unsigned long switcher_offset(void)
30796 {
30797 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30798 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30799 }
30800
30801 /* This cpu's struct lguest_pages. */
30802 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
30803 * These copies are pretty cheap, so we do them unconditionally: */
30804 /* Save the current Host top-level page directory.
30805 */
30806 +
30807 +#ifdef CONFIG_PAX_PER_CPU_PGD
30808 + pages->state.host_cr3 = read_cr3();
30809 +#else
30810 pages->state.host_cr3 = __pa(current->mm->pgd);
30811 +#endif
30812 +
30813 /*
30814 * Set up the Guest's page tables to see this CPU's pages (and no
30815 * other CPU's pages).
30816 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
30817 * compiled-in switcher code and the high-mapped copy we just made.
30818 */
30819 for (i = 0; i < IDT_ENTRIES; i++)
30820 - default_idt_entries[i] += switcher_offset();
30821 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30822
30823 /*
30824 * Set up the Switcher's per-cpu areas.
30825 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
30826 * it will be undisturbed when we switch. To change %cs and jump we
30827 * need this structure to feed to Intel's "lcall" instruction.
30828 */
30829 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30830 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30831 lguest_entry.segment = LGUEST_CS;
30832
30833 /*
30834 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
30835 index 40634b0..4f5855e 100644
30836 --- a/drivers/lguest/x86/switcher_32.S
30837 +++ b/drivers/lguest/x86/switcher_32.S
30838 @@ -87,6 +87,7 @@
30839 #include <asm/page.h>
30840 #include <asm/segment.h>
30841 #include <asm/lguest.h>
30842 +#include <asm/processor-flags.h>
30843
30844 // We mark the start of the code to copy
30845 // It's placed in .text tho it's never run here
30846 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30847 // Changes type when we load it: damn Intel!
30848 // For after we switch over our page tables
30849 // That entry will be read-only: we'd crash.
30850 +
30851 +#ifdef CONFIG_PAX_KERNEXEC
30852 + mov %cr0, %edx
30853 + xor $X86_CR0_WP, %edx
30854 + mov %edx, %cr0
30855 +#endif
30856 +
30857 movl $(GDT_ENTRY_TSS*8), %edx
30858 ltr %dx
30859
30860 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30861 // Let's clear it again for our return.
30862 // The GDT descriptor of the Host
30863 // Points to the table after two "size" bytes
30864 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30865 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30866 // Clear "used" from type field (byte 5, bit 2)
30867 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30868 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30869 +
30870 +#ifdef CONFIG_PAX_KERNEXEC
30871 + mov %cr0, %eax
30872 + xor $X86_CR0_WP, %eax
30873 + mov %eax, %cr0
30874 +#endif
30875
30876 // Once our page table's switched, the Guest is live!
30877 // The Host fades as we run this final step.
30878 @@ -295,13 +309,12 @@ deliver_to_host:
30879 // I consulted gcc, and it gave
30880 // These instructions, which I gladly credit:
30881 leal (%edx,%ebx,8), %eax
30882 - movzwl (%eax),%edx
30883 - movl 4(%eax), %eax
30884 - xorw %ax, %ax
30885 - orl %eax, %edx
30886 + movl 4(%eax), %edx
30887 + movw (%eax), %dx
30888 // Now the address of the handler's in %edx
30889 // We call it now: its "iret" drops us home.
30890 - jmp *%edx
30891 + ljmp $__KERNEL_CS, $1f
30892 +1: jmp *%edx
30893
30894 // Every interrupt can come to us here
30895 // But we must truly tell each apart.
30896 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
30897 index 4daf9e5..b8d1d0f 100644
30898 --- a/drivers/macintosh/macio_asic.c
30899 +++ b/drivers/macintosh/macio_asic.c
30900 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
30901 * MacIO is matched against any Apple ID, it's probe() function
30902 * will then decide wether it applies or not
30903 */
30904 -static const struct pci_device_id __devinitdata pci_ids [] = { {
30905 +static const struct pci_device_id __devinitconst pci_ids [] = { {
30906 .vendor = PCI_VENDOR_ID_APPLE,
30907 .device = PCI_ANY_ID,
30908 .subvendor = PCI_ANY_ID,
30909 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
30910 index 31c2dc2..a2de7a6 100644
30911 --- a/drivers/md/dm-ioctl.c
30912 +++ b/drivers/md/dm-ioctl.c
30913 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
30914 cmd == DM_LIST_VERSIONS_CMD)
30915 return 0;
30916
30917 - if ((cmd == DM_DEV_CREATE_CMD)) {
30918 + if (cmd == DM_DEV_CREATE_CMD) {
30919 if (!*param->name) {
30920 DMWARN("name not supplied when creating device");
30921 return -EINVAL;
30922 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
30923 index 9bfd057..01180bc 100644
30924 --- a/drivers/md/dm-raid1.c
30925 +++ b/drivers/md/dm-raid1.c
30926 @@ -40,7 +40,7 @@ enum dm_raid1_error {
30927
30928 struct mirror {
30929 struct mirror_set *ms;
30930 - atomic_t error_count;
30931 + atomic_unchecked_t error_count;
30932 unsigned long error_type;
30933 struct dm_dev *dev;
30934 sector_t offset;
30935 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
30936 struct mirror *m;
30937
30938 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
30939 - if (!atomic_read(&m->error_count))
30940 + if (!atomic_read_unchecked(&m->error_count))
30941 return m;
30942
30943 return NULL;
30944 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
30945 * simple way to tell if a device has encountered
30946 * errors.
30947 */
30948 - atomic_inc(&m->error_count);
30949 + atomic_inc_unchecked(&m->error_count);
30950
30951 if (test_and_set_bit(error_type, &m->error_type))
30952 return;
30953 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
30954 struct mirror *m = get_default_mirror(ms);
30955
30956 do {
30957 - if (likely(!atomic_read(&m->error_count)))
30958 + if (likely(!atomic_read_unchecked(&m->error_count)))
30959 return m;
30960
30961 if (m-- == ms->mirror)
30962 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
30963 {
30964 struct mirror *default_mirror = get_default_mirror(m->ms);
30965
30966 - return !atomic_read(&default_mirror->error_count);
30967 + return !atomic_read_unchecked(&default_mirror->error_count);
30968 }
30969
30970 static int mirror_available(struct mirror_set *ms, struct bio *bio)
30971 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
30972 */
30973 if (likely(region_in_sync(ms, region, 1)))
30974 m = choose_mirror(ms, bio->bi_sector);
30975 - else if (m && atomic_read(&m->error_count))
30976 + else if (m && atomic_read_unchecked(&m->error_count))
30977 m = NULL;
30978
30979 if (likely(m))
30980 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
30981 }
30982
30983 ms->mirror[mirror].ms = ms;
30984 - atomic_set(&(ms->mirror[mirror].error_count), 0);
30985 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
30986 ms->mirror[mirror].error_type = 0;
30987 ms->mirror[mirror].offset = offset;
30988
30989 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
30990 */
30991 static char device_status_char(struct mirror *m)
30992 {
30993 - if (!atomic_read(&(m->error_count)))
30994 + if (!atomic_read_unchecked(&(m->error_count)))
30995 return 'A';
30996
30997 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
30998 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
30999 index 3d80cf0..b77cc47 100644
31000 --- a/drivers/md/dm-stripe.c
31001 +++ b/drivers/md/dm-stripe.c
31002 @@ -20,7 +20,7 @@ struct stripe {
31003 struct dm_dev *dev;
31004 sector_t physical_start;
31005
31006 - atomic_t error_count;
31007 + atomic_unchecked_t error_count;
31008 };
31009
31010 struct stripe_c {
31011 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31012 kfree(sc);
31013 return r;
31014 }
31015 - atomic_set(&(sc->stripe[i].error_count), 0);
31016 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31017 }
31018
31019 ti->private = sc;
31020 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31021 DMEMIT("%d ", sc->stripes);
31022 for (i = 0; i < sc->stripes; i++) {
31023 DMEMIT("%s ", sc->stripe[i].dev->name);
31024 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31025 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31026 'D' : 'A';
31027 }
31028 buffer[i] = '\0';
31029 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31030 */
31031 for (i = 0; i < sc->stripes; i++)
31032 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31033 - atomic_inc(&(sc->stripe[i].error_count));
31034 - if (atomic_read(&(sc->stripe[i].error_count)) <
31035 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31036 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31037 DM_IO_ERROR_THRESHOLD)
31038 schedule_work(&sc->trigger_event);
31039 }
31040 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31041 index 8e91321..fd17aef 100644
31042 --- a/drivers/md/dm-table.c
31043 +++ b/drivers/md/dm-table.c
31044 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31045 if (!dev_size)
31046 return 0;
31047
31048 - if ((start >= dev_size) || (start + len > dev_size)) {
31049 + if ((start >= dev_size) || (len > dev_size - start)) {
31050 DMWARN("%s: %s too small for target: "
31051 "start=%llu, len=%llu, dev_size=%llu",
31052 dm_device_name(ti->table->md), bdevname(bdev, b),
31053 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31054 index 59c4f04..4c7b661 100644
31055 --- a/drivers/md/dm-thin-metadata.c
31056 +++ b/drivers/md/dm-thin-metadata.c
31057 @@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31058
31059 pmd->info.tm = tm;
31060 pmd->info.levels = 2;
31061 - pmd->info.value_type.context = pmd->data_sm;
31062 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31063 pmd->info.value_type.size = sizeof(__le64);
31064 pmd->info.value_type.inc = data_block_inc;
31065 pmd->info.value_type.dec = data_block_dec;
31066 @@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31067
31068 pmd->bl_info.tm = tm;
31069 pmd->bl_info.levels = 1;
31070 - pmd->bl_info.value_type.context = pmd->data_sm;
31071 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31072 pmd->bl_info.value_type.size = sizeof(__le64);
31073 pmd->bl_info.value_type.inc = data_block_inc;
31074 pmd->bl_info.value_type.dec = data_block_dec;
31075 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31076 index 4720f68..78d1df7 100644
31077 --- a/drivers/md/dm.c
31078 +++ b/drivers/md/dm.c
31079 @@ -177,9 +177,9 @@ struct mapped_device {
31080 /*
31081 * Event handling.
31082 */
31083 - atomic_t event_nr;
31084 + atomic_unchecked_t event_nr;
31085 wait_queue_head_t eventq;
31086 - atomic_t uevent_seq;
31087 + atomic_unchecked_t uevent_seq;
31088 struct list_head uevent_list;
31089 spinlock_t uevent_lock; /* Protect access to uevent_list */
31090
31091 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31092 rwlock_init(&md->map_lock);
31093 atomic_set(&md->holders, 1);
31094 atomic_set(&md->open_count, 0);
31095 - atomic_set(&md->event_nr, 0);
31096 - atomic_set(&md->uevent_seq, 0);
31097 + atomic_set_unchecked(&md->event_nr, 0);
31098 + atomic_set_unchecked(&md->uevent_seq, 0);
31099 INIT_LIST_HEAD(&md->uevent_list);
31100 spin_lock_init(&md->uevent_lock);
31101
31102 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31103
31104 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31105
31106 - atomic_inc(&md->event_nr);
31107 + atomic_inc_unchecked(&md->event_nr);
31108 wake_up(&md->eventq);
31109 }
31110
31111 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31112
31113 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31114 {
31115 - return atomic_add_return(1, &md->uevent_seq);
31116 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31117 }
31118
31119 uint32_t dm_get_event_nr(struct mapped_device *md)
31120 {
31121 - return atomic_read(&md->event_nr);
31122 + return atomic_read_unchecked(&md->event_nr);
31123 }
31124
31125 int dm_wait_event(struct mapped_device *md, int event_nr)
31126 {
31127 return wait_event_interruptible(md->eventq,
31128 - (event_nr != atomic_read(&md->event_nr)));
31129 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31130 }
31131
31132 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31133 diff --git a/drivers/md/md.c b/drivers/md/md.c
31134 index f47f1f8..b7f559e 100644
31135 --- a/drivers/md/md.c
31136 +++ b/drivers/md/md.c
31137 @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31138 * start build, activate spare
31139 */
31140 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31141 -static atomic_t md_event_count;
31142 +static atomic_unchecked_t md_event_count;
31143 void md_new_event(struct mddev *mddev)
31144 {
31145 - atomic_inc(&md_event_count);
31146 + atomic_inc_unchecked(&md_event_count);
31147 wake_up(&md_event_waiters);
31148 }
31149 EXPORT_SYMBOL_GPL(md_new_event);
31150 @@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31151 */
31152 static void md_new_event_inintr(struct mddev *mddev)
31153 {
31154 - atomic_inc(&md_event_count);
31155 + atomic_inc_unchecked(&md_event_count);
31156 wake_up(&md_event_waiters);
31157 }
31158
31159 @@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31160
31161 rdev->preferred_minor = 0xffff;
31162 rdev->data_offset = le64_to_cpu(sb->data_offset);
31163 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31164 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31165
31166 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31167 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31168 @@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31169 else
31170 sb->resync_offset = cpu_to_le64(0);
31171
31172 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31173 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31174
31175 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31176 sb->size = cpu_to_le64(mddev->dev_sectors);
31177 @@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31178 static ssize_t
31179 errors_show(struct md_rdev *rdev, char *page)
31180 {
31181 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31182 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31183 }
31184
31185 static ssize_t
31186 @@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31187 char *e;
31188 unsigned long n = simple_strtoul(buf, &e, 10);
31189 if (*buf && (*e == 0 || *e == '\n')) {
31190 - atomic_set(&rdev->corrected_errors, n);
31191 + atomic_set_unchecked(&rdev->corrected_errors, n);
31192 return len;
31193 }
31194 return -EINVAL;
31195 @@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31196 rdev->sb_loaded = 0;
31197 rdev->bb_page = NULL;
31198 atomic_set(&rdev->nr_pending, 0);
31199 - atomic_set(&rdev->read_errors, 0);
31200 - atomic_set(&rdev->corrected_errors, 0);
31201 + atomic_set_unchecked(&rdev->read_errors, 0);
31202 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31203
31204 INIT_LIST_HEAD(&rdev->same_set);
31205 init_waitqueue_head(&rdev->blocked_wait);
31206 @@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31207
31208 spin_unlock(&pers_lock);
31209 seq_printf(seq, "\n");
31210 - seq->poll_event = atomic_read(&md_event_count);
31211 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31212 return 0;
31213 }
31214 if (v == (void*)2) {
31215 @@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31216 chunk_kb ? "KB" : "B");
31217 if (bitmap->file) {
31218 seq_printf(seq, ", file: ");
31219 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31220 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31221 }
31222
31223 seq_printf(seq, "\n");
31224 @@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31225 return error;
31226
31227 seq = file->private_data;
31228 - seq->poll_event = atomic_read(&md_event_count);
31229 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31230 return error;
31231 }
31232
31233 @@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31234 /* always allow read */
31235 mask = POLLIN | POLLRDNORM;
31236
31237 - if (seq->poll_event != atomic_read(&md_event_count))
31238 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31239 mask |= POLLERR | POLLPRI;
31240 return mask;
31241 }
31242 @@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31243 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31244 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31245 (int)part_stat_read(&disk->part0, sectors[1]) -
31246 - atomic_read(&disk->sync_io);
31247 + atomic_read_unchecked(&disk->sync_io);
31248 /* sync IO will cause sync_io to increase before the disk_stats
31249 * as sync_io is counted when a request starts, and
31250 * disk_stats is counted when it completes.
31251 diff --git a/drivers/md/md.h b/drivers/md/md.h
31252 index cf742d9..7c7c745 100644
31253 --- a/drivers/md/md.h
31254 +++ b/drivers/md/md.h
31255 @@ -120,13 +120,13 @@ struct md_rdev {
31256 * only maintained for arrays that
31257 * support hot removal
31258 */
31259 - atomic_t read_errors; /* number of consecutive read errors that
31260 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31261 * we have tried to ignore.
31262 */
31263 struct timespec last_read_error; /* monotonic time since our
31264 * last read error
31265 */
31266 - atomic_t corrected_errors; /* number of corrected read errors,
31267 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31268 * for reporting to userspace and storing
31269 * in superblock.
31270 */
31271 @@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31272
31273 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31274 {
31275 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31276 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31277 }
31278
31279 struct md_personality
31280 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31281 index 50ed53b..4f29d7d 100644
31282 --- a/drivers/md/persistent-data/dm-space-map-checker.c
31283 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
31284 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31285 /*----------------------------------------------------------------*/
31286
31287 struct sm_checker {
31288 - struct dm_space_map sm;
31289 + dm_space_map_no_const sm;
31290
31291 struct count_array old_counts;
31292 struct count_array counts;
31293 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31294 index fc469ba..2d91555 100644
31295 --- a/drivers/md/persistent-data/dm-space-map-disk.c
31296 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
31297 @@ -23,7 +23,7 @@
31298 * Space map interface.
31299 */
31300 struct sm_disk {
31301 - struct dm_space_map sm;
31302 + dm_space_map_no_const sm;
31303
31304 struct ll_disk ll;
31305 struct ll_disk old_ll;
31306 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31307 index e89ae5e..062e4c2 100644
31308 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
31309 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31310 @@ -43,7 +43,7 @@ struct block_op {
31311 };
31312
31313 struct sm_metadata {
31314 - struct dm_space_map sm;
31315 + dm_space_map_no_const sm;
31316
31317 struct ll_disk ll;
31318 struct ll_disk old_ll;
31319 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31320 index 1cbfc6b..56e1dbb 100644
31321 --- a/drivers/md/persistent-data/dm-space-map.h
31322 +++ b/drivers/md/persistent-data/dm-space-map.h
31323 @@ -60,6 +60,7 @@ struct dm_space_map {
31324 int (*root_size)(struct dm_space_map *sm, size_t *result);
31325 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31326 };
31327 +typedef struct dm_space_map __no_const dm_space_map_no_const;
31328
31329 /*----------------------------------------------------------------*/
31330
31331 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31332 index 7d9e071..015b1d5 100644
31333 --- a/drivers/md/raid1.c
31334 +++ b/drivers/md/raid1.c
31335 @@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31336 if (r1_sync_page_io(rdev, sect, s,
31337 bio->bi_io_vec[idx].bv_page,
31338 READ) != 0)
31339 - atomic_add(s, &rdev->corrected_errors);
31340 + atomic_add_unchecked(s, &rdev->corrected_errors);
31341 }
31342 sectors -= s;
31343 sect += s;
31344 @@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31345 test_bit(In_sync, &rdev->flags)) {
31346 if (r1_sync_page_io(rdev, sect, s,
31347 conf->tmppage, READ)) {
31348 - atomic_add(s, &rdev->corrected_errors);
31349 + atomic_add_unchecked(s, &rdev->corrected_errors);
31350 printk(KERN_INFO
31351 "md/raid1:%s: read error corrected "
31352 "(%d sectors at %llu on %s)\n",
31353 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31354 index 685ddf3..955b087 100644
31355 --- a/drivers/md/raid10.c
31356 +++ b/drivers/md/raid10.c
31357 @@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31358 /* The write handler will notice the lack of
31359 * R10BIO_Uptodate and record any errors etc
31360 */
31361 - atomic_add(r10_bio->sectors,
31362 + atomic_add_unchecked(r10_bio->sectors,
31363 &conf->mirrors[d].rdev->corrected_errors);
31364
31365 /* for reconstruct, we always reschedule after a read.
31366 @@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31367 {
31368 struct timespec cur_time_mon;
31369 unsigned long hours_since_last;
31370 - unsigned int read_errors = atomic_read(&rdev->read_errors);
31371 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31372
31373 ktime_get_ts(&cur_time_mon);
31374
31375 @@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31376 * overflowing the shift of read_errors by hours_since_last.
31377 */
31378 if (hours_since_last >= 8 * sizeof(read_errors))
31379 - atomic_set(&rdev->read_errors, 0);
31380 + atomic_set_unchecked(&rdev->read_errors, 0);
31381 else
31382 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31383 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31384 }
31385
31386 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31387 @@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31388 return;
31389
31390 check_decay_read_errors(mddev, rdev);
31391 - atomic_inc(&rdev->read_errors);
31392 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
31393 + atomic_inc_unchecked(&rdev->read_errors);
31394 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31395 char b[BDEVNAME_SIZE];
31396 bdevname(rdev->bdev, b);
31397
31398 @@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31399 "md/raid10:%s: %s: Raid device exceeded "
31400 "read_error threshold [cur %d:max %d]\n",
31401 mdname(mddev), b,
31402 - atomic_read(&rdev->read_errors), max_read_errors);
31403 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31404 printk(KERN_NOTICE
31405 "md/raid10:%s: %s: Failing raid device\n",
31406 mdname(mddev), b);
31407 @@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31408 (unsigned long long)(
31409 sect + rdev->data_offset),
31410 bdevname(rdev->bdev, b));
31411 - atomic_add(s, &rdev->corrected_errors);
31412 + atomic_add_unchecked(s, &rdev->corrected_errors);
31413 }
31414
31415 rdev_dec_pending(rdev, mddev);
31416 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31417 index 858fdbb..b2dac95 100644
31418 --- a/drivers/md/raid5.c
31419 +++ b/drivers/md/raid5.c
31420 @@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31421 (unsigned long long)(sh->sector
31422 + rdev->data_offset),
31423 bdevname(rdev->bdev, b));
31424 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31425 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31426 clear_bit(R5_ReadError, &sh->dev[i].flags);
31427 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31428 }
31429 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31430 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31431 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31432 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31433 } else {
31434 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31435 int retry = 0;
31436 rdev = conf->disks[i].rdev;
31437
31438 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31439 - atomic_inc(&rdev->read_errors);
31440 + atomic_inc_unchecked(&rdev->read_errors);
31441 if (conf->mddev->degraded >= conf->max_degraded)
31442 printk_ratelimited(
31443 KERN_WARNING
31444 @@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31445 (unsigned long long)(sh->sector
31446 + rdev->data_offset),
31447 bdn);
31448 - else if (atomic_read(&rdev->read_errors)
31449 + else if (atomic_read_unchecked(&rdev->read_errors)
31450 > conf->max_nr_stripes)
31451 printk(KERN_WARNING
31452 "md/raid:%s: Too many read errors, failing device %s.\n",
31453 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31454 index ba9a643..e474ab5 100644
31455 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31456 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31457 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31458 .subvendor = _subvend, .subdevice = _subdev, \
31459 .driver_data = (unsigned long)&_driverdata }
31460
31461 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31462 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31463 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31464 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31465 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31466 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31467 index a7d876f..8c21b61 100644
31468 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
31469 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31470 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
31471 union {
31472 dmx_ts_cb ts;
31473 dmx_section_cb sec;
31474 - } cb;
31475 + } __no_const cb;
31476
31477 struct dvb_demux *demux;
31478 void *priv;
31479 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31480 index f732877..d38c35a 100644
31481 --- a/drivers/media/dvb/dvb-core/dvbdev.c
31482 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
31483 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31484 const struct dvb_device *template, void *priv, int type)
31485 {
31486 struct dvb_device *dvbdev;
31487 - struct file_operations *dvbdevfops;
31488 + file_operations_no_const *dvbdevfops;
31489 struct device *clsdev;
31490 int minor;
31491 int id;
31492 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31493 index 9f2a02c..5920f88 100644
31494 --- a/drivers/media/dvb/dvb-usb/cxusb.c
31495 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
31496 @@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31497 struct dib0700_adapter_state {
31498 int (*set_param_save) (struct dvb_frontend *,
31499 struct dvb_frontend_parameters *);
31500 -};
31501 +} __no_const;
31502
31503 static int dib7070_set_param_override(struct dvb_frontend *fe,
31504 struct dvb_frontend_parameters *fep)
31505 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31506 index f103ec1..5e8968b 100644
31507 --- a/drivers/media/dvb/dvb-usb/dw2102.c
31508 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
31509 @@ -95,7 +95,7 @@ struct su3000_state {
31510
31511 struct s6x0_state {
31512 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31513 -};
31514 +} __no_const;
31515
31516 /* debug */
31517 static int dvb_usb_dw2102_debug;
31518 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31519 index 404f63a..4796533 100644
31520 --- a/drivers/media/dvb/frontends/dib3000.h
31521 +++ b/drivers/media/dvb/frontends/dib3000.h
31522 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31523 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31524 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31525 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31526 -};
31527 +} __no_const;
31528
31529 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31530 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31531 diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31532 index 90bf573..e8463da 100644
31533 --- a/drivers/media/dvb/frontends/ds3000.c
31534 +++ b/drivers/media/dvb/frontends/ds3000.c
31535 @@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31536
31537 for (i = 0; i < 30 ; i++) {
31538 ds3000_read_status(fe, &status);
31539 - if (status && FE_HAS_LOCK)
31540 + if (status & FE_HAS_LOCK)
31541 break;
31542
31543 msleep(10);
31544 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31545 index 0564192..75b16f5 100644
31546 --- a/drivers/media/dvb/ngene/ngene-cards.c
31547 +++ b/drivers/media/dvb/ngene/ngene-cards.c
31548 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31549
31550 /****************************************************************************/
31551
31552 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31553 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31554 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31555 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31556 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31557 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31558 index 16a089f..ab1667d 100644
31559 --- a/drivers/media/radio/radio-cadet.c
31560 +++ b/drivers/media/radio/radio-cadet.c
31561 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31562 unsigned char readbuf[RDS_BUFFER];
31563 int i = 0;
31564
31565 + if (count > RDS_BUFFER)
31566 + return -EFAULT;
31567 mutex_lock(&dev->lock);
31568 if (dev->rdsstat == 0) {
31569 dev->rdsstat = 1;
31570 diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31571 index 61287fc..8b08712 100644
31572 --- a/drivers/media/rc/redrat3.c
31573 +++ b/drivers/media/rc/redrat3.c
31574 @@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31575 return carrier;
31576 }
31577
31578 -static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31579 +static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31580 {
31581 struct redrat3_dev *rr3 = rcdev->priv;
31582 struct device *dev = rr3->dev;
31583 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31584 index 9cde353..8c6a1c3 100644
31585 --- a/drivers/media/video/au0828/au0828.h
31586 +++ b/drivers/media/video/au0828/au0828.h
31587 @@ -191,7 +191,7 @@ struct au0828_dev {
31588
31589 /* I2C */
31590 struct i2c_adapter i2c_adap;
31591 - struct i2c_algorithm i2c_algo;
31592 + i2c_algorithm_no_const i2c_algo;
31593 struct i2c_client i2c_client;
31594 u32 i2c_rc;
31595
31596 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31597 index 68d1240..46b32eb 100644
31598 --- a/drivers/media/video/cx88/cx88-alsa.c
31599 +++ b/drivers/media/video/cx88/cx88-alsa.c
31600 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31601 * Only boards with eeprom and byte 1 at eeprom=1 have it
31602 */
31603
31604 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31605 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31606 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31607 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31608 {0, }
31609 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31610 index 305e6aa..0143317 100644
31611 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31612 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31613 @@ -196,7 +196,7 @@ struct pvr2_hdw {
31614
31615 /* I2C stuff */
31616 struct i2c_adapter i2c_adap;
31617 - struct i2c_algorithm i2c_algo;
31618 + i2c_algorithm_no_const i2c_algo;
31619 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31620 int i2c_cx25840_hack_state;
31621 int i2c_linked;
31622 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31623 index a0895bf..b7ebb1b 100644
31624 --- a/drivers/media/video/timblogiw.c
31625 +++ b/drivers/media/video/timblogiw.c
31626 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31627
31628 /* Platform device functions */
31629
31630 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31631 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31632 .vidioc_querycap = timblogiw_querycap,
31633 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31634 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31635 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31636 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31637 };
31638
31639 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31640 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31641 .owner = THIS_MODULE,
31642 .open = timblogiw_open,
31643 .release = timblogiw_close,
31644 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31645 index e9c6a60..daf6a33 100644
31646 --- a/drivers/message/fusion/mptbase.c
31647 +++ b/drivers/message/fusion/mptbase.c
31648 @@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31649 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31650 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31651
31652 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31653 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31654 +#else
31655 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31656 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31657 +#endif
31658 +
31659 /*
31660 * Rounding UP to nearest 4-kB boundary here...
31661 */
31662 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31663 index 9d95042..b808101 100644
31664 --- a/drivers/message/fusion/mptsas.c
31665 +++ b/drivers/message/fusion/mptsas.c
31666 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31667 return 0;
31668 }
31669
31670 +static inline void
31671 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31672 +{
31673 + if (phy_info->port_details) {
31674 + phy_info->port_details->rphy = rphy;
31675 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31676 + ioc->name, rphy));
31677 + }
31678 +
31679 + if (rphy) {
31680 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31681 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31682 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31683 + ioc->name, rphy, rphy->dev.release));
31684 + }
31685 +}
31686 +
31687 /* no mutex */
31688 static void
31689 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31690 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31691 return NULL;
31692 }
31693
31694 -static inline void
31695 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31696 -{
31697 - if (phy_info->port_details) {
31698 - phy_info->port_details->rphy = rphy;
31699 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31700 - ioc->name, rphy));
31701 - }
31702 -
31703 - if (rphy) {
31704 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31705 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31706 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31707 - ioc->name, rphy, rphy->dev.release));
31708 - }
31709 -}
31710 -
31711 static inline struct sas_port *
31712 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31713 {
31714 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
31715 index 0c3ced7..1fe34ec 100644
31716 --- a/drivers/message/fusion/mptscsih.c
31717 +++ b/drivers/message/fusion/mptscsih.c
31718 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31719
31720 h = shost_priv(SChost);
31721
31722 - if (h) {
31723 - if (h->info_kbuf == NULL)
31724 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31725 - return h->info_kbuf;
31726 - h->info_kbuf[0] = '\0';
31727 + if (!h)
31728 + return NULL;
31729
31730 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31731 - h->info_kbuf[size-1] = '\0';
31732 - }
31733 + if (h->info_kbuf == NULL)
31734 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31735 + return h->info_kbuf;
31736 + h->info_kbuf[0] = '\0';
31737 +
31738 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31739 + h->info_kbuf[size-1] = '\0';
31740
31741 return h->info_kbuf;
31742 }
31743 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
31744 index 07dbeaf..5533142 100644
31745 --- a/drivers/message/i2o/i2o_proc.c
31746 +++ b/drivers/message/i2o/i2o_proc.c
31747 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
31748 "Array Controller Device"
31749 };
31750
31751 -static char *chtostr(u8 * chars, int n)
31752 -{
31753 - char tmp[256];
31754 - tmp[0] = 0;
31755 - return strncat(tmp, (char *)chars, n);
31756 -}
31757 -
31758 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31759 char *group)
31760 {
31761 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
31762
31763 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31764 seq_printf(seq, "%-#8x", ddm_table.module_id);
31765 - seq_printf(seq, "%-29s",
31766 - chtostr(ddm_table.module_name_version, 28));
31767 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31768 seq_printf(seq, "%9d ", ddm_table.data_size);
31769 seq_printf(seq, "%8d", ddm_table.code_size);
31770
31771 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
31772
31773 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31774 seq_printf(seq, "%-#8x", dst->module_id);
31775 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31776 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31777 + seq_printf(seq, "%-.28s", dst->module_name_version);
31778 + seq_printf(seq, "%-.8s", dst->date);
31779 seq_printf(seq, "%8d ", dst->module_size);
31780 seq_printf(seq, "%8d ", dst->mpb_size);
31781 seq_printf(seq, "0x%04x", dst->module_flags);
31782 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
31783 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31784 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31785 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31786 - seq_printf(seq, "Vendor info : %s\n",
31787 - chtostr((u8 *) (work32 + 2), 16));
31788 - seq_printf(seq, "Product info : %s\n",
31789 - chtostr((u8 *) (work32 + 6), 16));
31790 - seq_printf(seq, "Description : %s\n",
31791 - chtostr((u8 *) (work32 + 10), 16));
31792 - seq_printf(seq, "Product rev. : %s\n",
31793 - chtostr((u8 *) (work32 + 14), 8));
31794 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31795 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31796 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31797 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31798
31799 seq_printf(seq, "Serial number : ");
31800 print_serial_number(seq, (u8 *) (work32 + 16),
31801 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
31802 }
31803
31804 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31805 - seq_printf(seq, "Module name : %s\n",
31806 - chtostr(result.module_name, 24));
31807 - seq_printf(seq, "Module revision : %s\n",
31808 - chtostr(result.module_rev, 8));
31809 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31810 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31811
31812 seq_printf(seq, "Serial number : ");
31813 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31814 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
31815 return 0;
31816 }
31817
31818 - seq_printf(seq, "Device name : %s\n",
31819 - chtostr(result.device_name, 64));
31820 - seq_printf(seq, "Service name : %s\n",
31821 - chtostr(result.service_name, 64));
31822 - seq_printf(seq, "Physical name : %s\n",
31823 - chtostr(result.physical_location, 64));
31824 - seq_printf(seq, "Instance number : %s\n",
31825 - chtostr(result.instance_number, 4));
31826 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
31827 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
31828 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31829 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31830
31831 return 0;
31832 }
31833 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
31834 index a8c08f3..155fe3d 100644
31835 --- a/drivers/message/i2o/iop.c
31836 +++ b/drivers/message/i2o/iop.c
31837 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
31838
31839 spin_lock_irqsave(&c->context_list_lock, flags);
31840
31841 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31842 - atomic_inc(&c->context_list_counter);
31843 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31844 + atomic_inc_unchecked(&c->context_list_counter);
31845
31846 - entry->context = atomic_read(&c->context_list_counter);
31847 + entry->context = atomic_read_unchecked(&c->context_list_counter);
31848
31849 list_add(&entry->list, &c->context_list);
31850
31851 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
31852
31853 #if BITS_PER_LONG == 64
31854 spin_lock_init(&c->context_list_lock);
31855 - atomic_set(&c->context_list_counter, 0);
31856 + atomic_set_unchecked(&c->context_list_counter, 0);
31857 INIT_LIST_HEAD(&c->context_list);
31858 #endif
31859
31860 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
31861 index 7ce65f4..e66e9bc 100644
31862 --- a/drivers/mfd/abx500-core.c
31863 +++ b/drivers/mfd/abx500-core.c
31864 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
31865
31866 struct abx500_device_entry {
31867 struct list_head list;
31868 - struct abx500_ops ops;
31869 + abx500_ops_no_const ops;
31870 struct device *dev;
31871 };
31872
31873 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
31874 index 5c2a06a..8fa077c 100644
31875 --- a/drivers/mfd/janz-cmodio.c
31876 +++ b/drivers/mfd/janz-cmodio.c
31877 @@ -13,6 +13,7 @@
31878
31879 #include <linux/kernel.h>
31880 #include <linux/module.h>
31881 +#include <linux/slab.h>
31882 #include <linux/init.h>
31883 #include <linux/pci.h>
31884 #include <linux/interrupt.h>
31885 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
31886 index 29d12a7..f900ba4 100644
31887 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
31888 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
31889 @@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
31890 * the lid is closed. This leads to interrupts as soon as a little move
31891 * is done.
31892 */
31893 - atomic_inc(&lis3->count);
31894 + atomic_inc_unchecked(&lis3->count);
31895
31896 wake_up_interruptible(&lis3->misc_wait);
31897 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
31898 @@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
31899 if (lis3->pm_dev)
31900 pm_runtime_get_sync(lis3->pm_dev);
31901
31902 - atomic_set(&lis3->count, 0);
31903 + atomic_set_unchecked(&lis3->count, 0);
31904 return 0;
31905 }
31906
31907 @@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
31908 add_wait_queue(&lis3->misc_wait, &wait);
31909 while (true) {
31910 set_current_state(TASK_INTERRUPTIBLE);
31911 - data = atomic_xchg(&lis3->count, 0);
31912 + data = atomic_xchg_unchecked(&lis3->count, 0);
31913 if (data)
31914 break;
31915
31916 @@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
31917 struct lis3lv02d, miscdev);
31918
31919 poll_wait(file, &lis3->misc_wait, wait);
31920 - if (atomic_read(&lis3->count))
31921 + if (atomic_read_unchecked(&lis3->count))
31922 return POLLIN | POLLRDNORM;
31923 return 0;
31924 }
31925 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
31926 index 2b1482a..5d33616 100644
31927 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
31928 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
31929 @@ -266,7 +266,7 @@ struct lis3lv02d {
31930 struct input_polled_dev *idev; /* input device */
31931 struct platform_device *pdev; /* platform device */
31932 struct regulator_bulk_data regulators[2];
31933 - atomic_t count; /* interrupt count after last read */
31934 + atomic_unchecked_t count; /* interrupt count after last read */
31935 union axis_conversion ac; /* hw -> logical axis */
31936 int mapped_btns[3];
31937
31938 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
31939 index 2f30bad..c4c13d0 100644
31940 --- a/drivers/misc/sgi-gru/gruhandles.c
31941 +++ b/drivers/misc/sgi-gru/gruhandles.c
31942 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31943 unsigned long nsec;
31944
31945 nsec = CLKS2NSEC(clks);
31946 - atomic_long_inc(&mcs_op_statistics[op].count);
31947 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
31948 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31949 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
31950 if (mcs_op_statistics[op].max < nsec)
31951 mcs_op_statistics[op].max = nsec;
31952 }
31953 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
31954 index 7768b87..f8aac38 100644
31955 --- a/drivers/misc/sgi-gru/gruprocfs.c
31956 +++ b/drivers/misc/sgi-gru/gruprocfs.c
31957 @@ -32,9 +32,9 @@
31958
31959 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31960
31961 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31962 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31963 {
31964 - unsigned long val = atomic_long_read(v);
31965 + unsigned long val = atomic_long_read_unchecked(v);
31966
31967 seq_printf(s, "%16lu %s\n", val, id);
31968 }
31969 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
31970
31971 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
31972 for (op = 0; op < mcsop_last; op++) {
31973 - count = atomic_long_read(&mcs_op_statistics[op].count);
31974 - total = atomic_long_read(&mcs_op_statistics[op].total);
31975 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31976 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31977 max = mcs_op_statistics[op].max;
31978 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31979 count ? total / count : 0, max);
31980 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
31981 index 5c3ce24..4915ccb 100644
31982 --- a/drivers/misc/sgi-gru/grutables.h
31983 +++ b/drivers/misc/sgi-gru/grutables.h
31984 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
31985 * GRU statistics.
31986 */
31987 struct gru_stats_s {
31988 - atomic_long_t vdata_alloc;
31989 - atomic_long_t vdata_free;
31990 - atomic_long_t gts_alloc;
31991 - atomic_long_t gts_free;
31992 - atomic_long_t gms_alloc;
31993 - atomic_long_t gms_free;
31994 - atomic_long_t gts_double_allocate;
31995 - atomic_long_t assign_context;
31996 - atomic_long_t assign_context_failed;
31997 - atomic_long_t free_context;
31998 - atomic_long_t load_user_context;
31999 - atomic_long_t load_kernel_context;
32000 - atomic_long_t lock_kernel_context;
32001 - atomic_long_t unlock_kernel_context;
32002 - atomic_long_t steal_user_context;
32003 - atomic_long_t steal_kernel_context;
32004 - atomic_long_t steal_context_failed;
32005 - atomic_long_t nopfn;
32006 - atomic_long_t asid_new;
32007 - atomic_long_t asid_next;
32008 - atomic_long_t asid_wrap;
32009 - atomic_long_t asid_reuse;
32010 - atomic_long_t intr;
32011 - atomic_long_t intr_cbr;
32012 - atomic_long_t intr_tfh;
32013 - atomic_long_t intr_spurious;
32014 - atomic_long_t intr_mm_lock_failed;
32015 - atomic_long_t call_os;
32016 - atomic_long_t call_os_wait_queue;
32017 - atomic_long_t user_flush_tlb;
32018 - atomic_long_t user_unload_context;
32019 - atomic_long_t user_exception;
32020 - atomic_long_t set_context_option;
32021 - atomic_long_t check_context_retarget_intr;
32022 - atomic_long_t check_context_unload;
32023 - atomic_long_t tlb_dropin;
32024 - atomic_long_t tlb_preload_page;
32025 - atomic_long_t tlb_dropin_fail_no_asid;
32026 - atomic_long_t tlb_dropin_fail_upm;
32027 - atomic_long_t tlb_dropin_fail_invalid;
32028 - atomic_long_t tlb_dropin_fail_range_active;
32029 - atomic_long_t tlb_dropin_fail_idle;
32030 - atomic_long_t tlb_dropin_fail_fmm;
32031 - atomic_long_t tlb_dropin_fail_no_exception;
32032 - atomic_long_t tfh_stale_on_fault;
32033 - atomic_long_t mmu_invalidate_range;
32034 - atomic_long_t mmu_invalidate_page;
32035 - atomic_long_t flush_tlb;
32036 - atomic_long_t flush_tlb_gru;
32037 - atomic_long_t flush_tlb_gru_tgh;
32038 - atomic_long_t flush_tlb_gru_zero_asid;
32039 + atomic_long_unchecked_t vdata_alloc;
32040 + atomic_long_unchecked_t vdata_free;
32041 + atomic_long_unchecked_t gts_alloc;
32042 + atomic_long_unchecked_t gts_free;
32043 + atomic_long_unchecked_t gms_alloc;
32044 + atomic_long_unchecked_t gms_free;
32045 + atomic_long_unchecked_t gts_double_allocate;
32046 + atomic_long_unchecked_t assign_context;
32047 + atomic_long_unchecked_t assign_context_failed;
32048 + atomic_long_unchecked_t free_context;
32049 + atomic_long_unchecked_t load_user_context;
32050 + atomic_long_unchecked_t load_kernel_context;
32051 + atomic_long_unchecked_t lock_kernel_context;
32052 + atomic_long_unchecked_t unlock_kernel_context;
32053 + atomic_long_unchecked_t steal_user_context;
32054 + atomic_long_unchecked_t steal_kernel_context;
32055 + atomic_long_unchecked_t steal_context_failed;
32056 + atomic_long_unchecked_t nopfn;
32057 + atomic_long_unchecked_t asid_new;
32058 + atomic_long_unchecked_t asid_next;
32059 + atomic_long_unchecked_t asid_wrap;
32060 + atomic_long_unchecked_t asid_reuse;
32061 + atomic_long_unchecked_t intr;
32062 + atomic_long_unchecked_t intr_cbr;
32063 + atomic_long_unchecked_t intr_tfh;
32064 + atomic_long_unchecked_t intr_spurious;
32065 + atomic_long_unchecked_t intr_mm_lock_failed;
32066 + atomic_long_unchecked_t call_os;
32067 + atomic_long_unchecked_t call_os_wait_queue;
32068 + atomic_long_unchecked_t user_flush_tlb;
32069 + atomic_long_unchecked_t user_unload_context;
32070 + atomic_long_unchecked_t user_exception;
32071 + atomic_long_unchecked_t set_context_option;
32072 + atomic_long_unchecked_t check_context_retarget_intr;
32073 + atomic_long_unchecked_t check_context_unload;
32074 + atomic_long_unchecked_t tlb_dropin;
32075 + atomic_long_unchecked_t tlb_preload_page;
32076 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32077 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32078 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32079 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32080 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32081 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32082 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32083 + atomic_long_unchecked_t tfh_stale_on_fault;
32084 + atomic_long_unchecked_t mmu_invalidate_range;
32085 + atomic_long_unchecked_t mmu_invalidate_page;
32086 + atomic_long_unchecked_t flush_tlb;
32087 + atomic_long_unchecked_t flush_tlb_gru;
32088 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32089 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32090
32091 - atomic_long_t copy_gpa;
32092 - atomic_long_t read_gpa;
32093 + atomic_long_unchecked_t copy_gpa;
32094 + atomic_long_unchecked_t read_gpa;
32095
32096 - atomic_long_t mesq_receive;
32097 - atomic_long_t mesq_receive_none;
32098 - atomic_long_t mesq_send;
32099 - atomic_long_t mesq_send_failed;
32100 - atomic_long_t mesq_noop;
32101 - atomic_long_t mesq_send_unexpected_error;
32102 - atomic_long_t mesq_send_lb_overflow;
32103 - atomic_long_t mesq_send_qlimit_reached;
32104 - atomic_long_t mesq_send_amo_nacked;
32105 - atomic_long_t mesq_send_put_nacked;
32106 - atomic_long_t mesq_page_overflow;
32107 - atomic_long_t mesq_qf_locked;
32108 - atomic_long_t mesq_qf_noop_not_full;
32109 - atomic_long_t mesq_qf_switch_head_failed;
32110 - atomic_long_t mesq_qf_unexpected_error;
32111 - atomic_long_t mesq_noop_unexpected_error;
32112 - atomic_long_t mesq_noop_lb_overflow;
32113 - atomic_long_t mesq_noop_qlimit_reached;
32114 - atomic_long_t mesq_noop_amo_nacked;
32115 - atomic_long_t mesq_noop_put_nacked;
32116 - atomic_long_t mesq_noop_page_overflow;
32117 + atomic_long_unchecked_t mesq_receive;
32118 + atomic_long_unchecked_t mesq_receive_none;
32119 + atomic_long_unchecked_t mesq_send;
32120 + atomic_long_unchecked_t mesq_send_failed;
32121 + atomic_long_unchecked_t mesq_noop;
32122 + atomic_long_unchecked_t mesq_send_unexpected_error;
32123 + atomic_long_unchecked_t mesq_send_lb_overflow;
32124 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32125 + atomic_long_unchecked_t mesq_send_amo_nacked;
32126 + atomic_long_unchecked_t mesq_send_put_nacked;
32127 + atomic_long_unchecked_t mesq_page_overflow;
32128 + atomic_long_unchecked_t mesq_qf_locked;
32129 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32130 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32131 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32132 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32133 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32134 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32135 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32136 + atomic_long_unchecked_t mesq_noop_put_nacked;
32137 + atomic_long_unchecked_t mesq_noop_page_overflow;
32138
32139 };
32140
32141 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32142 tghop_invalidate, mcsop_last};
32143
32144 struct mcs_op_statistic {
32145 - atomic_long_t count;
32146 - atomic_long_t total;
32147 + atomic_long_unchecked_t count;
32148 + atomic_long_unchecked_t total;
32149 unsigned long max;
32150 };
32151
32152 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32153
32154 #define STAT(id) do { \
32155 if (gru_options & OPT_STATS) \
32156 - atomic_long_inc(&gru_stats.id); \
32157 + atomic_long_inc_unchecked(&gru_stats.id); \
32158 } while (0)
32159
32160 #ifdef CONFIG_SGI_GRU_DEBUG
32161 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32162 index 851b2f2..a4ec097 100644
32163 --- a/drivers/misc/sgi-xp/xp.h
32164 +++ b/drivers/misc/sgi-xp/xp.h
32165 @@ -289,7 +289,7 @@ struct xpc_interface {
32166 xpc_notify_func, void *);
32167 void (*received) (short, int, void *);
32168 enum xp_retval (*partid_to_nasids) (short, void *);
32169 -};
32170 +} __no_const;
32171
32172 extern struct xpc_interface xpc_interface;
32173
32174 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32175 index b94d5f7..7f494c5 100644
32176 --- a/drivers/misc/sgi-xp/xpc.h
32177 +++ b/drivers/misc/sgi-xp/xpc.h
32178 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
32179 void (*received_payload) (struct xpc_channel *, void *);
32180 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32181 };
32182 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32183
32184 /* struct xpc_partition act_state values (for XPC HB) */
32185
32186 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32187 /* found in xpc_main.c */
32188 extern struct device *xpc_part;
32189 extern struct device *xpc_chan;
32190 -extern struct xpc_arch_operations xpc_arch_ops;
32191 +extern xpc_arch_operations_no_const xpc_arch_ops;
32192 extern int xpc_disengage_timelimit;
32193 extern int xpc_disengage_timedout;
32194 extern int xpc_activate_IRQ_rcvd;
32195 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32196 index 8d082b4..aa749ae 100644
32197 --- a/drivers/misc/sgi-xp/xpc_main.c
32198 +++ b/drivers/misc/sgi-xp/xpc_main.c
32199 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32200 .notifier_call = xpc_system_die,
32201 };
32202
32203 -struct xpc_arch_operations xpc_arch_ops;
32204 +xpc_arch_operations_no_const xpc_arch_ops;
32205
32206 /*
32207 * Timer function to enforce the timelimit on the partition disengage.
32208 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32209 index 6878a94..fe5c5f1 100644
32210 --- a/drivers/mmc/host/sdhci-pci.c
32211 +++ b/drivers/mmc/host/sdhci-pci.c
32212 @@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32213 .probe = via_probe,
32214 };
32215
32216 -static const struct pci_device_id pci_ids[] __devinitdata = {
32217 +static const struct pci_device_id pci_ids[] __devinitconst = {
32218 {
32219 .vendor = PCI_VENDOR_ID_RICOH,
32220 .device = PCI_DEVICE_ID_RICOH_R5C822,
32221 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32222 index e9fad91..0a7a16a 100644
32223 --- a/drivers/mtd/devices/doc2000.c
32224 +++ b/drivers/mtd/devices/doc2000.c
32225 @@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32226
32227 /* The ECC will not be calculated correctly if less than 512 is written */
32228 /* DBB-
32229 - if (len != 0x200 && eccbuf)
32230 + if (len != 0x200)
32231 printk(KERN_WARNING
32232 "ECC needs a full sector write (adr: %lx size %lx)\n",
32233 (long) to, (long) len);
32234 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32235 index a3f7a27..234016e 100644
32236 --- a/drivers/mtd/devices/doc2001.c
32237 +++ b/drivers/mtd/devices/doc2001.c
32238 @@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32239 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32240
32241 /* Don't allow read past end of device */
32242 - if (from >= this->totlen)
32243 + if (from >= this->totlen || !len)
32244 return -EINVAL;
32245
32246 /* Don't allow a single read to cross a 512-byte block boundary */
32247 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32248 index 3984d48..28aa897 100644
32249 --- a/drivers/mtd/nand/denali.c
32250 +++ b/drivers/mtd/nand/denali.c
32251 @@ -26,6 +26,7 @@
32252 #include <linux/pci.h>
32253 #include <linux/mtd/mtd.h>
32254 #include <linux/module.h>
32255 +#include <linux/slab.h>
32256
32257 #include "denali.h"
32258
32259 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32260 index ac40925..483b753 100644
32261 --- a/drivers/mtd/nftlmount.c
32262 +++ b/drivers/mtd/nftlmount.c
32263 @@ -24,6 +24,7 @@
32264 #include <asm/errno.h>
32265 #include <linux/delay.h>
32266 #include <linux/slab.h>
32267 +#include <linux/sched.h>
32268 #include <linux/mtd/mtd.h>
32269 #include <linux/mtd/nand.h>
32270 #include <linux/mtd/nftl.h>
32271 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32272 index 6c3fb5a..c542a81 100644
32273 --- a/drivers/mtd/ubi/build.c
32274 +++ b/drivers/mtd/ubi/build.c
32275 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32276 static int __init bytes_str_to_int(const char *str)
32277 {
32278 char *endp;
32279 - unsigned long result;
32280 + unsigned long result, scale = 1;
32281
32282 result = simple_strtoul(str, &endp, 0);
32283 if (str == endp || result >= INT_MAX) {
32284 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32285
32286 switch (*endp) {
32287 case 'G':
32288 - result *= 1024;
32289 + scale *= 1024;
32290 case 'M':
32291 - result *= 1024;
32292 + scale *= 1024;
32293 case 'K':
32294 - result *= 1024;
32295 + scale *= 1024;
32296 if (endp[1] == 'i' && endp[2] == 'B')
32297 endp += 2;
32298 case '\0':
32299 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32300 return -EINVAL;
32301 }
32302
32303 - return result;
32304 + if ((intoverflow_t)result*scale >= INT_MAX) {
32305 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32306 + str);
32307 + return -EINVAL;
32308 + }
32309 +
32310 + return result*scale;
32311 }
32312
32313 /**
32314 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32315 index 1feae59..c2a61d2 100644
32316 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
32317 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32318 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32319 */
32320
32321 #define ATL2_PARAM(X, desc) \
32322 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32323 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32324 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32325 MODULE_PARM_DESC(X, desc);
32326 #else
32327 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32328 index 9a517c2..a50cfcb 100644
32329 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32330 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32331 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32332
32333 int (*wait_comp)(struct bnx2x *bp,
32334 struct bnx2x_rx_mode_ramrod_params *p);
32335 -};
32336 +} __no_const;
32337
32338 /********************** Set multicast group ***********************************/
32339
32340 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32341 index 94b4bd0..73c02de 100644
32342 --- a/drivers/net/ethernet/broadcom/tg3.h
32343 +++ b/drivers/net/ethernet/broadcom/tg3.h
32344 @@ -134,6 +134,7 @@
32345 #define CHIPREV_ID_5750_A0 0x4000
32346 #define CHIPREV_ID_5750_A1 0x4001
32347 #define CHIPREV_ID_5750_A3 0x4003
32348 +#define CHIPREV_ID_5750_C1 0x4201
32349 #define CHIPREV_ID_5750_C2 0x4202
32350 #define CHIPREV_ID_5752_A0_HW 0x5000
32351 #define CHIPREV_ID_5752_A0 0x6000
32352 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32353 index c5f5479..2e8c260 100644
32354 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32355 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32356 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32357 */
32358 struct l2t_skb_cb {
32359 arp_failure_handler_func arp_failure_handler;
32360 -};
32361 +} __no_const;
32362
32363 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32364
32365 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32366 index 871bcaa..4043505 100644
32367 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
32368 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32369 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32370 for (i=0; i<ETH_ALEN; i++) {
32371 tmp.addr[i] = dev->dev_addr[i];
32372 }
32373 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32374 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32375 break;
32376
32377 case DE4X5_SET_HWADDR: /* Set the hardware address */
32378 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32379 spin_lock_irqsave(&lp->lock, flags);
32380 memcpy(&statbuf, &lp->pktStats, ioc->len);
32381 spin_unlock_irqrestore(&lp->lock, flags);
32382 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32383 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32384 return -EFAULT;
32385 break;
32386 }
32387 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32388 index 14d5b61..1398636 100644
32389 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
32390 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32391 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32392 {NULL}};
32393
32394
32395 -static const char *block_name[] __devinitdata = {
32396 +static const char *block_name[] __devinitconst = {
32397 "21140 non-MII",
32398 "21140 MII PHY",
32399 "21142 Serial PHY",
32400 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32401 index 4d01219..b58d26d 100644
32402 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32403 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32404 @@ -236,7 +236,7 @@ struct pci_id_info {
32405 int drv_flags; /* Driver use, intended as capability flags. */
32406 };
32407
32408 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32409 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32410 { /* Sometime a Level-One switch card. */
32411 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32412 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32413 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32414 index dcd7f7a..ecb7fb3 100644
32415 --- a/drivers/net/ethernet/dlink/sundance.c
32416 +++ b/drivers/net/ethernet/dlink/sundance.c
32417 @@ -218,7 +218,7 @@ enum {
32418 struct pci_id_info {
32419 const char *name;
32420 };
32421 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32422 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32423 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32424 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32425 {"D-Link DFE-580TX 4 port Server Adapter"},
32426 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32427 index bf266a0..e024af7 100644
32428 --- a/drivers/net/ethernet/emulex/benet/be_main.c
32429 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
32430 @@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32431
32432 if (wrapped)
32433 newacc += 65536;
32434 - ACCESS_ONCE(*acc) = newacc;
32435 + ACCESS_ONCE_RW(*acc) = newacc;
32436 }
32437
32438 void be_parse_stats(struct be_adapter *adapter)
32439 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32440 index 61d2bdd..7f1154a 100644
32441 --- a/drivers/net/ethernet/fealnx.c
32442 +++ b/drivers/net/ethernet/fealnx.c
32443 @@ -150,7 +150,7 @@ struct chip_info {
32444 int flags;
32445 };
32446
32447 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32448 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32449 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32450 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32451 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32452 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32453 index e1159e5..e18684d 100644
32454 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32455 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32456 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32457 {
32458 struct e1000_hw *hw = &adapter->hw;
32459 struct e1000_mac_info *mac = &hw->mac;
32460 - struct e1000_mac_operations *func = &mac->ops;
32461 + e1000_mac_operations_no_const *func = &mac->ops;
32462
32463 /* Set media type */
32464 switch (adapter->pdev->device) {
32465 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32466 index a3e65fd..f451444 100644
32467 --- a/drivers/net/ethernet/intel/e1000e/82571.c
32468 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
32469 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32470 {
32471 struct e1000_hw *hw = &adapter->hw;
32472 struct e1000_mac_info *mac = &hw->mac;
32473 - struct e1000_mac_operations *func = &mac->ops;
32474 + e1000_mac_operations_no_const *func = &mac->ops;
32475 u32 swsm = 0;
32476 u32 swsm2 = 0;
32477 bool force_clear_smbi = false;
32478 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32479 index 2967039..ca8c40c 100644
32480 --- a/drivers/net/ethernet/intel/e1000e/hw.h
32481 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
32482 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
32483 void (*write_vfta)(struct e1000_hw *, u32, u32);
32484 s32 (*read_mac_addr)(struct e1000_hw *);
32485 };
32486 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32487
32488 /*
32489 * When to use various PHY register access functions:
32490 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
32491 void (*power_up)(struct e1000_hw *);
32492 void (*power_down)(struct e1000_hw *);
32493 };
32494 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32495
32496 /* Function pointers for the NVM. */
32497 struct e1000_nvm_operations {
32498 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32499 s32 (*validate)(struct e1000_hw *);
32500 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32501 };
32502 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32503
32504 struct e1000_mac_info {
32505 - struct e1000_mac_operations ops;
32506 + e1000_mac_operations_no_const ops;
32507 u8 addr[ETH_ALEN];
32508 u8 perm_addr[ETH_ALEN];
32509
32510 @@ -872,7 +875,7 @@ struct e1000_mac_info {
32511 };
32512
32513 struct e1000_phy_info {
32514 - struct e1000_phy_operations ops;
32515 + e1000_phy_operations_no_const ops;
32516
32517 enum e1000_phy_type type;
32518
32519 @@ -906,7 +909,7 @@ struct e1000_phy_info {
32520 };
32521
32522 struct e1000_nvm_info {
32523 - struct e1000_nvm_operations ops;
32524 + e1000_nvm_operations_no_const ops;
32525
32526 enum e1000_nvm_type type;
32527 enum e1000_nvm_override override;
32528 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32529 index 4519a13..f97fcd0 100644
32530 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32531 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32532 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
32533 s32 (*read_mac_addr)(struct e1000_hw *);
32534 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32535 };
32536 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32537
32538 struct e1000_phy_operations {
32539 s32 (*acquire)(struct e1000_hw *);
32540 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
32541 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32542 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32543 };
32544 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32545
32546 struct e1000_nvm_operations {
32547 s32 (*acquire)(struct e1000_hw *);
32548 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32549 s32 (*update)(struct e1000_hw *);
32550 s32 (*validate)(struct e1000_hw *);
32551 };
32552 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32553
32554 struct e1000_info {
32555 s32 (*get_invariants)(struct e1000_hw *);
32556 @@ -350,7 +353,7 @@ struct e1000_info {
32557 extern const struct e1000_info e1000_82575_info;
32558
32559 struct e1000_mac_info {
32560 - struct e1000_mac_operations ops;
32561 + e1000_mac_operations_no_const ops;
32562
32563 u8 addr[6];
32564 u8 perm_addr[6];
32565 @@ -388,7 +391,7 @@ struct e1000_mac_info {
32566 };
32567
32568 struct e1000_phy_info {
32569 - struct e1000_phy_operations ops;
32570 + e1000_phy_operations_no_const ops;
32571
32572 enum e1000_phy_type type;
32573
32574 @@ -423,7 +426,7 @@ struct e1000_phy_info {
32575 };
32576
32577 struct e1000_nvm_info {
32578 - struct e1000_nvm_operations ops;
32579 + e1000_nvm_operations_no_const ops;
32580 enum e1000_nvm_type type;
32581 enum e1000_nvm_override override;
32582
32583 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32584 s32 (*check_for_ack)(struct e1000_hw *, u16);
32585 s32 (*check_for_rst)(struct e1000_hw *, u16);
32586 };
32587 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32588
32589 struct e1000_mbx_stats {
32590 u32 msgs_tx;
32591 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32592 };
32593
32594 struct e1000_mbx_info {
32595 - struct e1000_mbx_operations ops;
32596 + e1000_mbx_operations_no_const ops;
32597 struct e1000_mbx_stats stats;
32598 u32 timeout;
32599 u32 usec_delay;
32600 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32601 index d7ed58f..64cde36 100644
32602 --- a/drivers/net/ethernet/intel/igbvf/vf.h
32603 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
32604 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
32605 s32 (*read_mac_addr)(struct e1000_hw *);
32606 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32607 };
32608 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32609
32610 struct e1000_mac_info {
32611 - struct e1000_mac_operations ops;
32612 + e1000_mac_operations_no_const ops;
32613 u8 addr[6];
32614 u8 perm_addr[6];
32615
32616 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32617 s32 (*check_for_ack)(struct e1000_hw *);
32618 s32 (*check_for_rst)(struct e1000_hw *);
32619 };
32620 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32621
32622 struct e1000_mbx_stats {
32623 u32 msgs_tx;
32624 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32625 };
32626
32627 struct e1000_mbx_info {
32628 - struct e1000_mbx_operations ops;
32629 + e1000_mbx_operations_no_const ops;
32630 struct e1000_mbx_stats stats;
32631 u32 timeout;
32632 u32 usec_delay;
32633 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32634 index 6c5cca8..de8ef63 100644
32635 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32636 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32637 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32638 s32 (*update_checksum)(struct ixgbe_hw *);
32639 u16 (*calc_checksum)(struct ixgbe_hw *);
32640 };
32641 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32642
32643 struct ixgbe_mac_operations {
32644 s32 (*init_hw)(struct ixgbe_hw *);
32645 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32646 /* Manageability interface */
32647 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32648 };
32649 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32650
32651 struct ixgbe_phy_operations {
32652 s32 (*identify)(struct ixgbe_hw *);
32653 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32654 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32655 s32 (*check_overtemp)(struct ixgbe_hw *);
32656 };
32657 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32658
32659 struct ixgbe_eeprom_info {
32660 - struct ixgbe_eeprom_operations ops;
32661 + ixgbe_eeprom_operations_no_const ops;
32662 enum ixgbe_eeprom_type type;
32663 u32 semaphore_delay;
32664 u16 word_size;
32665 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32666
32667 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32668 struct ixgbe_mac_info {
32669 - struct ixgbe_mac_operations ops;
32670 + ixgbe_mac_operations_no_const ops;
32671 enum ixgbe_mac_type type;
32672 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32673 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32674 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32675 };
32676
32677 struct ixgbe_phy_info {
32678 - struct ixgbe_phy_operations ops;
32679 + ixgbe_phy_operations_no_const ops;
32680 struct mdio_if_info mdio;
32681 enum ixgbe_phy_type type;
32682 u32 id;
32683 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32684 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32685 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32686 };
32687 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32688
32689 struct ixgbe_mbx_stats {
32690 u32 msgs_tx;
32691 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32692 };
32693
32694 struct ixgbe_mbx_info {
32695 - struct ixgbe_mbx_operations ops;
32696 + ixgbe_mbx_operations_no_const ops;
32697 struct ixgbe_mbx_stats stats;
32698 u32 timeout;
32699 u32 usec_delay;
32700 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32701 index 10306b4..28df758 100644
32702 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32703 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32704 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32705 s32 (*clear_vfta)(struct ixgbe_hw *);
32706 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32707 };
32708 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32709
32710 enum ixgbe_mac_type {
32711 ixgbe_mac_unknown = 0,
32712 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
32713 };
32714
32715 struct ixgbe_mac_info {
32716 - struct ixgbe_mac_operations ops;
32717 + ixgbe_mac_operations_no_const ops;
32718 u8 addr[6];
32719 u8 perm_addr[6];
32720
32721 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
32722 s32 (*check_for_ack)(struct ixgbe_hw *);
32723 s32 (*check_for_rst)(struct ixgbe_hw *);
32724 };
32725 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32726
32727 struct ixgbe_mbx_stats {
32728 u32 msgs_tx;
32729 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
32730 };
32731
32732 struct ixgbe_mbx_info {
32733 - struct ixgbe_mbx_operations ops;
32734 + ixgbe_mbx_operations_no_const ops;
32735 struct ixgbe_mbx_stats stats;
32736 u32 timeout;
32737 u32 udelay;
32738 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
32739 index 94bbc85..78c12e6 100644
32740 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
32741 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
32742 @@ -40,6 +40,7 @@
32743 #include <linux/dma-mapping.h>
32744 #include <linux/slab.h>
32745 #include <linux/io-mapping.h>
32746 +#include <linux/sched.h>
32747
32748 #include <linux/mlx4/device.h>
32749 #include <linux/mlx4/doorbell.h>
32750 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32751 index 5046a64..71ca936 100644
32752 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
32753 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32754 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32755 void (*link_down)(struct __vxge_hw_device *devh);
32756 void (*crit_err)(struct __vxge_hw_device *devh,
32757 enum vxge_hw_event type, u64 ext_data);
32758 -};
32759 +} __no_const;
32760
32761 /*
32762 * struct __vxge_hw_blockpool_entry - Block private data structure
32763 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32764 index 4a518a3..936b334 100644
32765 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32766 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32767 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32768 struct vxge_hw_mempool_dma *dma_object,
32769 u32 index,
32770 u32 is_last);
32771 -};
32772 +} __no_const;
32773
32774 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32775 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32776 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
32777 index c8f47f1..5da9840 100644
32778 --- a/drivers/net/ethernet/realtek/r8169.c
32779 +++ b/drivers/net/ethernet/realtek/r8169.c
32780 @@ -698,17 +698,17 @@ struct rtl8169_private {
32781 struct mdio_ops {
32782 void (*write)(void __iomem *, int, int);
32783 int (*read)(void __iomem *, int);
32784 - } mdio_ops;
32785 + } __no_const mdio_ops;
32786
32787 struct pll_power_ops {
32788 void (*down)(struct rtl8169_private *);
32789 void (*up)(struct rtl8169_private *);
32790 - } pll_power_ops;
32791 + } __no_const pll_power_ops;
32792
32793 struct jumbo_ops {
32794 void (*enable)(struct rtl8169_private *);
32795 void (*disable)(struct rtl8169_private *);
32796 - } jumbo_ops;
32797 + } __no_const jumbo_ops;
32798
32799 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32800 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32801 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
32802 index 1b4658c..a30dabb 100644
32803 --- a/drivers/net/ethernet/sis/sis190.c
32804 +++ b/drivers/net/ethernet/sis/sis190.c
32805 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
32806 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32807 struct net_device *dev)
32808 {
32809 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32810 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32811 struct sis190_private *tp = netdev_priv(dev);
32812 struct pci_dev *isa_bridge;
32813 u8 reg, tmp8;
32814 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
32815 index edfa15d..002bfa9 100644
32816 --- a/drivers/net/ppp/ppp_generic.c
32817 +++ b/drivers/net/ppp/ppp_generic.c
32818 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32819 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32820 struct ppp_stats stats;
32821 struct ppp_comp_stats cstats;
32822 - char *vers;
32823
32824 switch (cmd) {
32825 case SIOCGPPPSTATS:
32826 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32827 break;
32828
32829 case SIOCGPPPVER:
32830 - vers = PPP_VERSION;
32831 - if (copy_to_user(addr, vers, strlen(vers) + 1))
32832 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32833 break;
32834 err = 0;
32835 break;
32836 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
32837 index 515f122..41dd273 100644
32838 --- a/drivers/net/tokenring/abyss.c
32839 +++ b/drivers/net/tokenring/abyss.c
32840 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
32841
32842 static int __init abyss_init (void)
32843 {
32844 - abyss_netdev_ops = tms380tr_netdev_ops;
32845 + pax_open_kernel();
32846 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32847
32848 - abyss_netdev_ops.ndo_open = abyss_open;
32849 - abyss_netdev_ops.ndo_stop = abyss_close;
32850 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32851 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32852 + pax_close_kernel();
32853
32854 return pci_register_driver(&abyss_driver);
32855 }
32856 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
32857 index 6153cfd..cf69c1c 100644
32858 --- a/drivers/net/tokenring/madgemc.c
32859 +++ b/drivers/net/tokenring/madgemc.c
32860 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
32861
32862 static int __init madgemc_init (void)
32863 {
32864 - madgemc_netdev_ops = tms380tr_netdev_ops;
32865 - madgemc_netdev_ops.ndo_open = madgemc_open;
32866 - madgemc_netdev_ops.ndo_stop = madgemc_close;
32867 + pax_open_kernel();
32868 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32869 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32870 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32871 + pax_close_kernel();
32872
32873 return mca_register_driver (&madgemc_driver);
32874 }
32875 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
32876 index 8d362e6..f91cc52 100644
32877 --- a/drivers/net/tokenring/proteon.c
32878 +++ b/drivers/net/tokenring/proteon.c
32879 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
32880 struct platform_device *pdev;
32881 int i, num = 0, err = 0;
32882
32883 - proteon_netdev_ops = tms380tr_netdev_ops;
32884 - proteon_netdev_ops.ndo_open = proteon_open;
32885 - proteon_netdev_ops.ndo_stop = tms380tr_close;
32886 + pax_open_kernel();
32887 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32888 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32889 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32890 + pax_close_kernel();
32891
32892 err = platform_driver_register(&proteon_driver);
32893 if (err)
32894 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
32895 index 46db5c5..37c1536 100644
32896 --- a/drivers/net/tokenring/skisa.c
32897 +++ b/drivers/net/tokenring/skisa.c
32898 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32899 struct platform_device *pdev;
32900 int i, num = 0, err = 0;
32901
32902 - sk_isa_netdev_ops = tms380tr_netdev_ops;
32903 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
32904 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32905 + pax_open_kernel();
32906 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32907 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
32908 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32909 + pax_close_kernel();
32910
32911 err = platform_driver_register(&sk_isa_driver);
32912 if (err)
32913 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
32914 index 304fe78..db112fa 100644
32915 --- a/drivers/net/usb/hso.c
32916 +++ b/drivers/net/usb/hso.c
32917 @@ -71,7 +71,7 @@
32918 #include <asm/byteorder.h>
32919 #include <linux/serial_core.h>
32920 #include <linux/serial.h>
32921 -
32922 +#include <asm/local.h>
32923
32924 #define MOD_AUTHOR "Option Wireless"
32925 #define MOD_DESCRIPTION "USB High Speed Option driver"
32926 @@ -257,7 +257,7 @@ struct hso_serial {
32927
32928 /* from usb_serial_port */
32929 struct tty_struct *tty;
32930 - int open_count;
32931 + local_t open_count;
32932 spinlock_t serial_lock;
32933
32934 int (*write_data) (struct hso_serial *serial);
32935 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
32936 struct urb *urb;
32937
32938 urb = serial->rx_urb[0];
32939 - if (serial->open_count > 0) {
32940 + if (local_read(&serial->open_count) > 0) {
32941 count = put_rxbuf_data(urb, serial);
32942 if (count == -1)
32943 return;
32944 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
32945 DUMP1(urb->transfer_buffer, urb->actual_length);
32946
32947 /* Anyone listening? */
32948 - if (serial->open_count == 0)
32949 + if (local_read(&serial->open_count) == 0)
32950 return;
32951
32952 if (status == 0) {
32953 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
32954 spin_unlock_irq(&serial->serial_lock);
32955
32956 /* check for port already opened, if not set the termios */
32957 - serial->open_count++;
32958 - if (serial->open_count == 1) {
32959 + if (local_inc_return(&serial->open_count) == 1) {
32960 serial->rx_state = RX_IDLE;
32961 /* Force default termio settings */
32962 _hso_serial_set_termios(tty, NULL);
32963 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
32964 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32965 if (result) {
32966 hso_stop_serial_device(serial->parent);
32967 - serial->open_count--;
32968 + local_dec(&serial->open_count);
32969 kref_put(&serial->parent->ref, hso_serial_ref_free);
32970 }
32971 } else {
32972 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
32973
32974 /* reset the rts and dtr */
32975 /* do the actual close */
32976 - serial->open_count--;
32977 + local_dec(&serial->open_count);
32978
32979 - if (serial->open_count <= 0) {
32980 - serial->open_count = 0;
32981 + if (local_read(&serial->open_count) <= 0) {
32982 + local_set(&serial->open_count, 0);
32983 spin_lock_irq(&serial->serial_lock);
32984 if (serial->tty == tty) {
32985 serial->tty->driver_data = NULL;
32986 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
32987
32988 /* the actual setup */
32989 spin_lock_irqsave(&serial->serial_lock, flags);
32990 - if (serial->open_count)
32991 + if (local_read(&serial->open_count))
32992 _hso_serial_set_termios(tty, old);
32993 else
32994 tty->termios = old;
32995 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
32996 D1("Pending read interrupt on port %d\n", i);
32997 spin_lock(&serial->serial_lock);
32998 if (serial->rx_state == RX_IDLE &&
32999 - serial->open_count > 0) {
33000 + local_read(&serial->open_count) > 0) {
33001 /* Setup and send a ctrl req read on
33002 * port i */
33003 if (!serial->rx_urb_filled[0]) {
33004 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33005 /* Start all serial ports */
33006 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33007 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33008 - if (dev2ser(serial_table[i])->open_count) {
33009 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
33010 result =
33011 hso_start_serial_device(serial_table[i], GFP_NOIO);
33012 hso_kick_transmit(dev2ser(serial_table[i]));
33013 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33014 index e662cbc..8d4a102 100644
33015 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33016 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33017 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33018 * Return with error code if any of the queue indices
33019 * is out of range
33020 */
33021 - if (p->ring_index[i] < 0 ||
33022 - p->ring_index[i] >= adapter->num_rx_queues)
33023 + if (p->ring_index[i] >= adapter->num_rx_queues)
33024 return -EINVAL;
33025 }
33026
33027 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33028 index 0f9ee46..e2d6e65 100644
33029 --- a/drivers/net/wireless/ath/ath.h
33030 +++ b/drivers/net/wireless/ath/ath.h
33031 @@ -119,6 +119,7 @@ struct ath_ops {
33032 void (*write_flush) (void *);
33033 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33034 };
33035 +typedef struct ath_ops __no_const ath_ops_no_const;
33036
33037 struct ath_common;
33038 struct ath_bus_ops;
33039 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33040 index b592016..fe47870 100644
33041 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33042 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33043 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33044 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33045 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33046
33047 - ACCESS_ONCE(ads->ds_link) = i->link;
33048 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33049 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
33050 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33051
33052 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33053 ctl6 = SM(i->keytype, AR_EncrType);
33054 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33055
33056 if ((i->is_first || i->is_last) &&
33057 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33058 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33059 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33060 | set11nTries(i->rates, 1)
33061 | set11nTries(i->rates, 2)
33062 | set11nTries(i->rates, 3)
33063 | (i->dur_update ? AR_DurUpdateEna : 0)
33064 | SM(0, AR_BurstDur);
33065
33066 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33067 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33068 | set11nRate(i->rates, 1)
33069 | set11nRate(i->rates, 2)
33070 | set11nRate(i->rates, 3);
33071 } else {
33072 - ACCESS_ONCE(ads->ds_ctl2) = 0;
33073 - ACCESS_ONCE(ads->ds_ctl3) = 0;
33074 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33075 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33076 }
33077
33078 if (!i->is_first) {
33079 - ACCESS_ONCE(ads->ds_ctl0) = 0;
33080 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33081 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33082 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33083 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33084 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33085 return;
33086 }
33087
33088 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33089 break;
33090 }
33091
33092 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33093 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33094 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33095 | SM(i->txpower, AR_XmitPower)
33096 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33097 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33098 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33099 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33100
33101 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33102 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33103 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33104 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33105
33106 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33107 return;
33108
33109 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33110 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33111 | set11nPktDurRTSCTS(i->rates, 1);
33112
33113 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33114 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33115 | set11nPktDurRTSCTS(i->rates, 3);
33116
33117 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33118 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33119 | set11nRateFlags(i->rates, 1)
33120 | set11nRateFlags(i->rates, 2)
33121 | set11nRateFlags(i->rates, 3)
33122 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33123 index f5ae3c6..7936af3 100644
33124 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33125 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33126 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33127 (i->qcu << AR_TxQcuNum_S) | 0x17;
33128
33129 checksum += val;
33130 - ACCESS_ONCE(ads->info) = val;
33131 + ACCESS_ONCE_RW(ads->info) = val;
33132
33133 checksum += i->link;
33134 - ACCESS_ONCE(ads->link) = i->link;
33135 + ACCESS_ONCE_RW(ads->link) = i->link;
33136
33137 checksum += i->buf_addr[0];
33138 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33139 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33140 checksum += i->buf_addr[1];
33141 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33142 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33143 checksum += i->buf_addr[2];
33144 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33145 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33146 checksum += i->buf_addr[3];
33147 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33148 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33149
33150 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33151 - ACCESS_ONCE(ads->ctl3) = val;
33152 + ACCESS_ONCE_RW(ads->ctl3) = val;
33153 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33154 - ACCESS_ONCE(ads->ctl5) = val;
33155 + ACCESS_ONCE_RW(ads->ctl5) = val;
33156 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33157 - ACCESS_ONCE(ads->ctl7) = val;
33158 + ACCESS_ONCE_RW(ads->ctl7) = val;
33159 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33160 - ACCESS_ONCE(ads->ctl9) = val;
33161 + ACCESS_ONCE_RW(ads->ctl9) = val;
33162
33163 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33164 - ACCESS_ONCE(ads->ctl10) = checksum;
33165 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
33166
33167 if (i->is_first || i->is_last) {
33168 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33169 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33170 | set11nTries(i->rates, 1)
33171 | set11nTries(i->rates, 2)
33172 | set11nTries(i->rates, 3)
33173 | (i->dur_update ? AR_DurUpdateEna : 0)
33174 | SM(0, AR_BurstDur);
33175
33176 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33177 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33178 | set11nRate(i->rates, 1)
33179 | set11nRate(i->rates, 2)
33180 | set11nRate(i->rates, 3);
33181 } else {
33182 - ACCESS_ONCE(ads->ctl13) = 0;
33183 - ACCESS_ONCE(ads->ctl14) = 0;
33184 + ACCESS_ONCE_RW(ads->ctl13) = 0;
33185 + ACCESS_ONCE_RW(ads->ctl14) = 0;
33186 }
33187
33188 ads->ctl20 = 0;
33189 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33190
33191 ctl17 = SM(i->keytype, AR_EncrType);
33192 if (!i->is_first) {
33193 - ACCESS_ONCE(ads->ctl11) = 0;
33194 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33195 - ACCESS_ONCE(ads->ctl15) = 0;
33196 - ACCESS_ONCE(ads->ctl16) = 0;
33197 - ACCESS_ONCE(ads->ctl17) = ctl17;
33198 - ACCESS_ONCE(ads->ctl18) = 0;
33199 - ACCESS_ONCE(ads->ctl19) = 0;
33200 + ACCESS_ONCE_RW(ads->ctl11) = 0;
33201 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33202 + ACCESS_ONCE_RW(ads->ctl15) = 0;
33203 + ACCESS_ONCE_RW(ads->ctl16) = 0;
33204 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33205 + ACCESS_ONCE_RW(ads->ctl18) = 0;
33206 + ACCESS_ONCE_RW(ads->ctl19) = 0;
33207 return;
33208 }
33209
33210 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33211 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33212 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33213 | SM(i->txpower, AR_XmitPower)
33214 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33215 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33216 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33217 ctl12 |= SM(val, AR_PAPRDChainMask);
33218
33219 - ACCESS_ONCE(ads->ctl12) = ctl12;
33220 - ACCESS_ONCE(ads->ctl17) = ctl17;
33221 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33222 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33223
33224 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33225 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33226 | set11nPktDurRTSCTS(i->rates, 1);
33227
33228 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33229 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33230 | set11nPktDurRTSCTS(i->rates, 3);
33231
33232 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33233 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33234 | set11nRateFlags(i->rates, 1)
33235 | set11nRateFlags(i->rates, 2)
33236 | set11nRateFlags(i->rates, 3)
33237 | SM(i->rtscts_rate, AR_RTSCTSRate);
33238
33239 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33240 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33241 }
33242
33243 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33244 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33245 index f389b3c..7359e18 100644
33246 --- a/drivers/net/wireless/ath/ath9k/hw.h
33247 +++ b/drivers/net/wireless/ath/ath9k/hw.h
33248 @@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33249
33250 /* ANI */
33251 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33252 -};
33253 +} __no_const;
33254
33255 /**
33256 * struct ath_hw_ops - callbacks used by hardware code and driver code
33257 @@ -635,7 +635,7 @@ struct ath_hw_ops {
33258 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33259 struct ath_hw_antcomb_conf *antconf);
33260
33261 -};
33262 +} __no_const;
33263
33264 struct ath_nf_limits {
33265 s16 max;
33266 @@ -655,7 +655,7 @@ enum ath_cal_list {
33267 #define AH_FASTCC 0x4
33268
33269 struct ath_hw {
33270 - struct ath_ops reg_ops;
33271 + ath_ops_no_const reg_ops;
33272
33273 struct ieee80211_hw *hw;
33274 struct ath_common common;
33275 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33276 index bea8524..c677c06 100644
33277 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33278 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33279 @@ -547,7 +547,7 @@ struct phy_func_ptr {
33280 void (*carrsuppr)(struct brcms_phy *);
33281 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33282 void (*detach)(struct brcms_phy *);
33283 -};
33284 +} __no_const;
33285
33286 struct brcms_phy {
33287 struct brcms_phy_pub pubpi_ro;
33288 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33289 index 05f2ad1..ae00eea 100644
33290 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33291 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33292 @@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33293 */
33294 if (iwl3945_mod_params.disable_hw_scan) {
33295 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33296 - iwl3945_hw_ops.hw_scan = NULL;
33297 + pax_open_kernel();
33298 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33299 + pax_close_kernel();
33300 }
33301
33302 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33303 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33304 index 69a77e2..552b42c 100644
33305 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33306 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33307 @@ -71,8 +71,8 @@ do { \
33308 } while (0)
33309
33310 #else
33311 -#define IWL_DEBUG(m, level, fmt, args...)
33312 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33313 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33314 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33315 #define iwl_print_hex_dump(m, level, p, len)
33316 #endif /* CONFIG_IWLWIFI_DEBUG */
33317
33318 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33319 index 523ad55..f8c5dc5 100644
33320 --- a/drivers/net/wireless/mac80211_hwsim.c
33321 +++ b/drivers/net/wireless/mac80211_hwsim.c
33322 @@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33323 return -EINVAL;
33324
33325 if (fake_hw_scan) {
33326 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33327 - mac80211_hwsim_ops.sw_scan_start = NULL;
33328 - mac80211_hwsim_ops.sw_scan_complete = NULL;
33329 + pax_open_kernel();
33330 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33331 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33332 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33333 + pax_close_kernel();
33334 }
33335
33336 spin_lock_init(&hwsim_radio_lock);
33337 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33338 index 30f138b..c904585 100644
33339 --- a/drivers/net/wireless/mwifiex/main.h
33340 +++ b/drivers/net/wireless/mwifiex/main.h
33341 @@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33342 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33343 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33344 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33345 -};
33346 +} __no_const;
33347
33348 struct mwifiex_adapter {
33349 u8 iface_type;
33350 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33351 index 0c13840..a5c3ed6 100644
33352 --- a/drivers/net/wireless/rndis_wlan.c
33353 +++ b/drivers/net/wireless/rndis_wlan.c
33354 @@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33355
33356 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33357
33358 - if (rts_threshold < 0 || rts_threshold > 2347)
33359 + if (rts_threshold > 2347)
33360 rts_threshold = 2347;
33361
33362 tmp = cpu_to_le32(rts_threshold);
33363 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33364 index a77f1bb..c608b2b 100644
33365 --- a/drivers/net/wireless/wl1251/wl1251.h
33366 +++ b/drivers/net/wireless/wl1251/wl1251.h
33367 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
33368 void (*reset)(struct wl1251 *wl);
33369 void (*enable_irq)(struct wl1251 *wl);
33370 void (*disable_irq)(struct wl1251 *wl);
33371 -};
33372 +} __no_const;
33373
33374 struct wl1251 {
33375 struct ieee80211_hw *hw;
33376 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33377 index f34b5b2..b5abb9f 100644
33378 --- a/drivers/oprofile/buffer_sync.c
33379 +++ b/drivers/oprofile/buffer_sync.c
33380 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33381 if (cookie == NO_COOKIE)
33382 offset = pc;
33383 if (cookie == INVALID_COOKIE) {
33384 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33385 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33386 offset = pc;
33387 }
33388 if (cookie != last_cookie) {
33389 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33390 /* add userspace sample */
33391
33392 if (!mm) {
33393 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
33394 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33395 return 0;
33396 }
33397
33398 cookie = lookup_dcookie(mm, s->eip, &offset);
33399
33400 if (cookie == INVALID_COOKIE) {
33401 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33402 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33403 return 0;
33404 }
33405
33406 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33407 /* ignore backtraces if failed to add a sample */
33408 if (state == sb_bt_start) {
33409 state = sb_bt_ignore;
33410 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33411 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33412 }
33413 }
33414 release_mm(mm);
33415 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33416 index c0cc4e7..44d4e54 100644
33417 --- a/drivers/oprofile/event_buffer.c
33418 +++ b/drivers/oprofile/event_buffer.c
33419 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33420 }
33421
33422 if (buffer_pos == buffer_size) {
33423 - atomic_inc(&oprofile_stats.event_lost_overflow);
33424 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33425 return;
33426 }
33427
33428 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33429 index f8c752e..28bf4fc 100644
33430 --- a/drivers/oprofile/oprof.c
33431 +++ b/drivers/oprofile/oprof.c
33432 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33433 if (oprofile_ops.switch_events())
33434 return;
33435
33436 - atomic_inc(&oprofile_stats.multiplex_counter);
33437 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33438 start_switch_worker();
33439 }
33440
33441 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33442 index 917d28e..d62d981 100644
33443 --- a/drivers/oprofile/oprofile_stats.c
33444 +++ b/drivers/oprofile/oprofile_stats.c
33445 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33446 cpu_buf->sample_invalid_eip = 0;
33447 }
33448
33449 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33450 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33451 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
33452 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33453 - atomic_set(&oprofile_stats.multiplex_counter, 0);
33454 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33455 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33456 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33457 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33458 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33459 }
33460
33461
33462 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33463 index 38b6fc0..b5cbfce 100644
33464 --- a/drivers/oprofile/oprofile_stats.h
33465 +++ b/drivers/oprofile/oprofile_stats.h
33466 @@ -13,11 +13,11 @@
33467 #include <linux/atomic.h>
33468
33469 struct oprofile_stat_struct {
33470 - atomic_t sample_lost_no_mm;
33471 - atomic_t sample_lost_no_mapping;
33472 - atomic_t bt_lost_no_mapping;
33473 - atomic_t event_lost_overflow;
33474 - atomic_t multiplex_counter;
33475 + atomic_unchecked_t sample_lost_no_mm;
33476 + atomic_unchecked_t sample_lost_no_mapping;
33477 + atomic_unchecked_t bt_lost_no_mapping;
33478 + atomic_unchecked_t event_lost_overflow;
33479 + atomic_unchecked_t multiplex_counter;
33480 };
33481
33482 extern struct oprofile_stat_struct oprofile_stats;
33483 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33484 index 2f0aa0f..90fab02 100644
33485 --- a/drivers/oprofile/oprofilefs.c
33486 +++ b/drivers/oprofile/oprofilefs.c
33487 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33488
33489
33490 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33491 - char const *name, atomic_t *val)
33492 + char const *name, atomic_unchecked_t *val)
33493 {
33494 return __oprofilefs_create_file(sb, root, name,
33495 &atomic_ro_fops, 0444, val);
33496 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33497 index 3f56bc0..707d642 100644
33498 --- a/drivers/parport/procfs.c
33499 +++ b/drivers/parport/procfs.c
33500 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33501
33502 *ppos += len;
33503
33504 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33505 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33506 }
33507
33508 #ifdef CONFIG_PARPORT_1284
33509 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33510
33511 *ppos += len;
33512
33513 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33514 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33515 }
33516 #endif /* IEEE1284.3 support. */
33517
33518 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33519 index 9fff878..ad0ad53 100644
33520 --- a/drivers/pci/hotplug/cpci_hotplug.h
33521 +++ b/drivers/pci/hotplug/cpci_hotplug.h
33522 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33523 int (*hardware_test) (struct slot* slot, u32 value);
33524 u8 (*get_power) (struct slot* slot);
33525 int (*set_power) (struct slot* slot, int value);
33526 -};
33527 +} __no_const;
33528
33529 struct cpci_hp_controller {
33530 unsigned int irq;
33531 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33532 index 76ba8a1..20ca857 100644
33533 --- a/drivers/pci/hotplug/cpqphp_nvram.c
33534 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
33535 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33536
33537 void compaq_nvram_init (void __iomem *rom_start)
33538 {
33539 +
33540 +#ifndef CONFIG_PAX_KERNEXEC
33541 if (rom_start) {
33542 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33543 }
33544 +#endif
33545 +
33546 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33547
33548 /* initialize our int15 lock */
33549 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33550 index cbfbab1..6a9fced 100644
33551 --- a/drivers/pci/pcie/aspm.c
33552 +++ b/drivers/pci/pcie/aspm.c
33553 @@ -27,9 +27,9 @@
33554 #define MODULE_PARAM_PREFIX "pcie_aspm."
33555
33556 /* Note: those are not register definitions */
33557 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33558 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33559 -#define ASPM_STATE_L1 (4) /* L1 state */
33560 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33561 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33562 +#define ASPM_STATE_L1 (4U) /* L1 state */
33563 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33564 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33565
33566 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33567 index 04e74f4..a960176 100644
33568 --- a/drivers/pci/probe.c
33569 +++ b/drivers/pci/probe.c
33570 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33571 u32 l, sz, mask;
33572 u16 orig_cmd;
33573
33574 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33575 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33576
33577 if (!dev->mmio_always_on) {
33578 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33579 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33580 index 27911b5..5b6db88 100644
33581 --- a/drivers/pci/proc.c
33582 +++ b/drivers/pci/proc.c
33583 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33584 static int __init pci_proc_init(void)
33585 {
33586 struct pci_dev *dev = NULL;
33587 +
33588 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33589 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33590 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33591 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33592 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33593 +#endif
33594 +#else
33595 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33596 +#endif
33597 proc_create("devices", 0, proc_bus_pci_dir,
33598 &proc_bus_pci_dev_operations);
33599 proc_initialized = 1;
33600 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33601 index 7b82868..b9344c9 100644
33602 --- a/drivers/platform/x86/thinkpad_acpi.c
33603 +++ b/drivers/platform/x86/thinkpad_acpi.c
33604 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33605 return 0;
33606 }
33607
33608 -void static hotkey_mask_warn_incomplete_mask(void)
33609 +static void hotkey_mask_warn_incomplete_mask(void)
33610 {
33611 /* log only what the user can fix... */
33612 const u32 wantedmask = hotkey_driver_mask &
33613 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33614 }
33615 }
33616
33617 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33618 - struct tp_nvram_state *newn,
33619 - const u32 event_mask)
33620 -{
33621 -
33622 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33623 do { \
33624 if ((event_mask & (1 << __scancode)) && \
33625 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33626 tpacpi_hotkey_send_key(__scancode); \
33627 } while (0)
33628
33629 - void issue_volchange(const unsigned int oldvol,
33630 - const unsigned int newvol)
33631 - {
33632 - unsigned int i = oldvol;
33633 +static void issue_volchange(const unsigned int oldvol,
33634 + const unsigned int newvol,
33635 + const u32 event_mask)
33636 +{
33637 + unsigned int i = oldvol;
33638
33639 - while (i > newvol) {
33640 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33641 - i--;
33642 - }
33643 - while (i < newvol) {
33644 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33645 - i++;
33646 - }
33647 + while (i > newvol) {
33648 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33649 + i--;
33650 }
33651 + while (i < newvol) {
33652 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33653 + i++;
33654 + }
33655 +}
33656
33657 - void issue_brightnesschange(const unsigned int oldbrt,
33658 - const unsigned int newbrt)
33659 - {
33660 - unsigned int i = oldbrt;
33661 +static void issue_brightnesschange(const unsigned int oldbrt,
33662 + const unsigned int newbrt,
33663 + const u32 event_mask)
33664 +{
33665 + unsigned int i = oldbrt;
33666
33667 - while (i > newbrt) {
33668 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33669 - i--;
33670 - }
33671 - while (i < newbrt) {
33672 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33673 - i++;
33674 - }
33675 + while (i > newbrt) {
33676 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33677 + i--;
33678 + }
33679 + while (i < newbrt) {
33680 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33681 + i++;
33682 }
33683 +}
33684
33685 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33686 + struct tp_nvram_state *newn,
33687 + const u32 event_mask)
33688 +{
33689 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33690 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33691 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33692 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33693 oldn->volume_level != newn->volume_level) {
33694 /* recently muted, or repeated mute keypress, or
33695 * multiple presses ending in mute */
33696 - issue_volchange(oldn->volume_level, newn->volume_level);
33697 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33698 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33699 }
33700 } else {
33701 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33702 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33703 }
33704 if (oldn->volume_level != newn->volume_level) {
33705 - issue_volchange(oldn->volume_level, newn->volume_level);
33706 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33707 } else if (oldn->volume_toggle != newn->volume_toggle) {
33708 /* repeated vol up/down keypress at end of scale ? */
33709 if (newn->volume_level == 0)
33710 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33711 /* handle brightness */
33712 if (oldn->brightness_level != newn->brightness_level) {
33713 issue_brightnesschange(oldn->brightness_level,
33714 - newn->brightness_level);
33715 + newn->brightness_level,
33716 + event_mask);
33717 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
33718 /* repeated key presses that didn't change state */
33719 if (newn->brightness_level == 0)
33720 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33721 && !tp_features.bright_unkfw)
33722 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33723 }
33724 +}
33725
33726 #undef TPACPI_COMPARE_KEY
33727 #undef TPACPI_MAY_SEND_KEY
33728 -}
33729
33730 /*
33731 * Polling driver
33732 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
33733 index b859d16..5cc6b1a 100644
33734 --- a/drivers/pnp/pnpbios/bioscalls.c
33735 +++ b/drivers/pnp/pnpbios/bioscalls.c
33736 @@ -59,7 +59,7 @@ do { \
33737 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33738 } while(0)
33739
33740 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33741 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33742 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33743
33744 /*
33745 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33746
33747 cpu = get_cpu();
33748 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33749 +
33750 + pax_open_kernel();
33751 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33752 + pax_close_kernel();
33753
33754 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33755 spin_lock_irqsave(&pnp_bios_lock, flags);
33756 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33757 :"memory");
33758 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33759
33760 + pax_open_kernel();
33761 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33762 + pax_close_kernel();
33763 +
33764 put_cpu();
33765
33766 /* If we get here and this is set then the PnP BIOS faulted on us. */
33767 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
33768 return status;
33769 }
33770
33771 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33772 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33773 {
33774 int i;
33775
33776 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33777 pnp_bios_callpoint.offset = header->fields.pm16offset;
33778 pnp_bios_callpoint.segment = PNP_CS16;
33779
33780 + pax_open_kernel();
33781 +
33782 for_each_possible_cpu(i) {
33783 struct desc_struct *gdt = get_cpu_gdt_table(i);
33784 if (!gdt)
33785 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33786 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33787 (unsigned long)__va(header->fields.pm16dseg));
33788 }
33789 +
33790 + pax_close_kernel();
33791 }
33792 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
33793 index b0ecacb..7c9da2e 100644
33794 --- a/drivers/pnp/resource.c
33795 +++ b/drivers/pnp/resource.c
33796 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
33797 return 1;
33798
33799 /* check if the resource is valid */
33800 - if (*irq < 0 || *irq > 15)
33801 + if (*irq > 15)
33802 return 0;
33803
33804 /* check if the resource is reserved */
33805 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
33806 return 1;
33807
33808 /* check if the resource is valid */
33809 - if (*dma < 0 || *dma == 4 || *dma > 7)
33810 + if (*dma == 4 || *dma > 7)
33811 return 0;
33812
33813 /* check if the resource is reserved */
33814 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
33815 index bb16f5b..c751eef 100644
33816 --- a/drivers/power/bq27x00_battery.c
33817 +++ b/drivers/power/bq27x00_battery.c
33818 @@ -67,7 +67,7 @@
33819 struct bq27x00_device_info;
33820 struct bq27x00_access_methods {
33821 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33822 -};
33823 +} __no_const;
33824
33825 enum bq27x00_chip { BQ27000, BQ27500 };
33826
33827 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
33828 index 33f5d9a..d957d3f 100644
33829 --- a/drivers/regulator/max8660.c
33830 +++ b/drivers/regulator/max8660.c
33831 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
33832 max8660->shadow_regs[MAX8660_OVER1] = 5;
33833 } else {
33834 /* Otherwise devices can be toggled via software */
33835 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
33836 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
33837 + pax_open_kernel();
33838 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33839 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33840 + pax_close_kernel();
33841 }
33842
33843 /*
33844 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
33845 index 023d17d..74ef35b 100644
33846 --- a/drivers/regulator/mc13892-regulator.c
33847 +++ b/drivers/regulator/mc13892-regulator.c
33848 @@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
33849 }
33850 mc13xxx_unlock(mc13892);
33851
33852 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33853 + pax_open_kernel();
33854 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33855 = mc13892_vcam_set_mode;
33856 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33857 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33858 = mc13892_vcam_get_mode;
33859 + pax_close_kernel();
33860 for (i = 0; i < pdata->num_regulators; i++) {
33861 init_data = &pdata->regulators[i];
33862 priv->regulators[i] = regulator_register(
33863 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
33864 index cace6d3..f623fda 100644
33865 --- a/drivers/rtc/rtc-dev.c
33866 +++ b/drivers/rtc/rtc-dev.c
33867 @@ -14,6 +14,7 @@
33868 #include <linux/module.h>
33869 #include <linux/rtc.h>
33870 #include <linux/sched.h>
33871 +#include <linux/grsecurity.h>
33872 #include "rtc-core.h"
33873
33874 static dev_t rtc_devt;
33875 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
33876 if (copy_from_user(&tm, uarg, sizeof(tm)))
33877 return -EFAULT;
33878
33879 + gr_log_timechange();
33880 +
33881 return rtc_set_time(rtc, &tm);
33882
33883 case RTC_PIE_ON:
33884 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
33885 index ffb5878..e6d785c 100644
33886 --- a/drivers/scsi/aacraid/aacraid.h
33887 +++ b/drivers/scsi/aacraid/aacraid.h
33888 @@ -492,7 +492,7 @@ struct adapter_ops
33889 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33890 /* Administrative operations */
33891 int (*adapter_comm)(struct aac_dev * dev, int comm);
33892 -};
33893 +} __no_const;
33894
33895 /*
33896 * Define which interrupt handler needs to be installed
33897 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
33898 index 705e13e..91c873c 100644
33899 --- a/drivers/scsi/aacraid/linit.c
33900 +++ b/drivers/scsi/aacraid/linit.c
33901 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
33902 #elif defined(__devinitconst)
33903 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33904 #else
33905 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33906 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33907 #endif
33908 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33909 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33910 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
33911 index d5ff142..49c0ebb 100644
33912 --- a/drivers/scsi/aic94xx/aic94xx_init.c
33913 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
33914 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
33915 .lldd_control_phy = asd_control_phy,
33916 };
33917
33918 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33919 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33920 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33921 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33922 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33923 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
33924 index a796de9..1ef20e1 100644
33925 --- a/drivers/scsi/bfa/bfa.h
33926 +++ b/drivers/scsi/bfa/bfa.h
33927 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
33928 u32 *end);
33929 int cpe_vec_q0;
33930 int rme_vec_q0;
33931 -};
33932 +} __no_const;
33933 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33934
33935 struct bfa_faa_cbfn_s {
33936 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
33937 index e07bd47..cd1bbbb 100644
33938 --- a/drivers/scsi/bfa/bfa_fcpim.c
33939 +++ b/drivers/scsi/bfa/bfa_fcpim.c
33940 @@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
33941
33942 bfa_iotag_attach(fcp);
33943
33944 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
33945 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
33946 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
33947 (fcp->num_itns * sizeof(struct bfa_itn_s));
33948 memset(fcp->itn_arr, 0,
33949 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
33950 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
33951 {
33952 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
33953 - struct bfa_itn_s *itn;
33954 + bfa_itn_s_no_const *itn;
33955
33956 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
33957 itn->isr = isr;
33958 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
33959 index 1080bcb..a3b39e3 100644
33960 --- a/drivers/scsi/bfa/bfa_fcpim.h
33961 +++ b/drivers/scsi/bfa/bfa_fcpim.h
33962 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
33963 struct bfa_itn_s {
33964 bfa_isr_func_t isr;
33965 };
33966 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
33967
33968 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
33969 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
33970 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
33971 struct list_head iotag_tio_free_q; /* free IO resources */
33972 struct list_head iotag_unused_q; /* unused IO resources*/
33973 struct bfa_iotag_s *iotag_arr;
33974 - struct bfa_itn_s *itn_arr;
33975 + bfa_itn_s_no_const *itn_arr;
33976 int num_ioim_reqs;
33977 int num_fwtio_reqs;
33978 int num_itns;
33979 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
33980 index 546d46b..642fa5b 100644
33981 --- a/drivers/scsi/bfa/bfa_ioc.h
33982 +++ b/drivers/scsi/bfa/bfa_ioc.h
33983 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
33984 bfa_ioc_disable_cbfn_t disable_cbfn;
33985 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33986 bfa_ioc_reset_cbfn_t reset_cbfn;
33987 -};
33988 +} __no_const;
33989
33990 /*
33991 * IOC event notification mechanism.
33992 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
33993 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33994 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33995 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
33996 -};
33997 +} __no_const;
33998
33999 /*
34000 * Queue element to wait for room in request queue. FIFO order is
34001 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34002 index 351dc0b..951dc32 100644
34003 --- a/drivers/scsi/hosts.c
34004 +++ b/drivers/scsi/hosts.c
34005 @@ -42,7 +42,7 @@
34006 #include "scsi_logging.h"
34007
34008
34009 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
34010 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34011
34012
34013 static void scsi_host_cls_release(struct device *dev)
34014 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34015 * subtract one because we increment first then return, but we need to
34016 * know what the next host number was before increment
34017 */
34018 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34019 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34020 shost->dma_channel = 0xff;
34021
34022 /* These three are default values which can be overridden */
34023 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34024 index 865d452..e9b7fa7 100644
34025 --- a/drivers/scsi/hpsa.c
34026 +++ b/drivers/scsi/hpsa.c
34027 @@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34028 u32 a;
34029
34030 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34031 - return h->access.command_completed(h);
34032 + return h->access->command_completed(h);
34033
34034 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34035 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34036 @@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34037 while (!list_empty(&h->reqQ)) {
34038 c = list_entry(h->reqQ.next, struct CommandList, list);
34039 /* can't do anything if fifo is full */
34040 - if ((h->access.fifo_full(h))) {
34041 + if ((h->access->fifo_full(h))) {
34042 dev_warn(&h->pdev->dev, "fifo full\n");
34043 break;
34044 }
34045 @@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34046 h->Qdepth--;
34047
34048 /* Tell the controller execute command */
34049 - h->access.submit_command(h, c);
34050 + h->access->submit_command(h, c);
34051
34052 /* Put job onto the completed Q */
34053 addQ(&h->cmpQ, c);
34054 @@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34055
34056 static inline unsigned long get_next_completion(struct ctlr_info *h)
34057 {
34058 - return h->access.command_completed(h);
34059 + return h->access->command_completed(h);
34060 }
34061
34062 static inline bool interrupt_pending(struct ctlr_info *h)
34063 {
34064 - return h->access.intr_pending(h);
34065 + return h->access->intr_pending(h);
34066 }
34067
34068 static inline long interrupt_not_for_us(struct ctlr_info *h)
34069 {
34070 - return (h->access.intr_pending(h) == 0) ||
34071 + return (h->access->intr_pending(h) == 0) ||
34072 (h->interrupts_enabled == 0);
34073 }
34074
34075 @@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34076 if (prod_index < 0)
34077 return -ENODEV;
34078 h->product_name = products[prod_index].product_name;
34079 - h->access = *(products[prod_index].access);
34080 + h->access = products[prod_index].access;
34081
34082 if (hpsa_board_disabled(h->pdev)) {
34083 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34084 @@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34085
34086 assert_spin_locked(&lockup_detector_lock);
34087 remove_ctlr_from_lockup_detector_list(h);
34088 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34089 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34090 spin_lock_irqsave(&h->lock, flags);
34091 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34092 spin_unlock_irqrestore(&h->lock, flags);
34093 @@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34094 }
34095
34096 /* make sure the board interrupts are off */
34097 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34098 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34099
34100 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34101 goto clean2;
34102 @@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34103 * fake ones to scoop up any residual completions.
34104 */
34105 spin_lock_irqsave(&h->lock, flags);
34106 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34107 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34108 spin_unlock_irqrestore(&h->lock, flags);
34109 free_irq(h->intr[h->intr_mode], h);
34110 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34111 @@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34112 dev_info(&h->pdev->dev, "Board READY.\n");
34113 dev_info(&h->pdev->dev,
34114 "Waiting for stale completions to drain.\n");
34115 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34116 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34117 msleep(10000);
34118 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34119 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34120
34121 rc = controller_reset_failed(h->cfgtable);
34122 if (rc)
34123 @@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34124 }
34125
34126 /* Turn the interrupts on so we can service requests */
34127 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34128 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34129
34130 hpsa_hba_inquiry(h);
34131 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34132 @@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34133 * To write all data in the battery backed cache to disks
34134 */
34135 hpsa_flush_cache(h);
34136 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34137 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34138 free_irq(h->intr[h->intr_mode], h);
34139 #ifdef CONFIG_PCI_MSI
34140 if (h->msix_vector)
34141 @@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34142 return;
34143 }
34144 /* Change the access methods to the performant access methods */
34145 - h->access = SA5_performant_access;
34146 + h->access = &SA5_performant_access;
34147 h->transMethod = CFGTBL_Trans_Performant;
34148 }
34149
34150 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34151 index 91edafb..a9b88ec 100644
34152 --- a/drivers/scsi/hpsa.h
34153 +++ b/drivers/scsi/hpsa.h
34154 @@ -73,7 +73,7 @@ struct ctlr_info {
34155 unsigned int msix_vector;
34156 unsigned int msi_vector;
34157 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34158 - struct access_method access;
34159 + struct access_method *access;
34160
34161 /* queue and queue Info */
34162 struct list_head reqQ;
34163 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34164 index f2df059..a3a9930 100644
34165 --- a/drivers/scsi/ips.h
34166 +++ b/drivers/scsi/ips.h
34167 @@ -1027,7 +1027,7 @@ typedef struct {
34168 int (*intr)(struct ips_ha *);
34169 void (*enableint)(struct ips_ha *);
34170 uint32_t (*statupd)(struct ips_ha *);
34171 -} ips_hw_func_t;
34172 +} __no_const ips_hw_func_t;
34173
34174 typedef struct ips_ha {
34175 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34176 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34177 index 9de9db2..1e09660 100644
34178 --- a/drivers/scsi/libfc/fc_exch.c
34179 +++ b/drivers/scsi/libfc/fc_exch.c
34180 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
34181 * all together if not used XXX
34182 */
34183 struct {
34184 - atomic_t no_free_exch;
34185 - atomic_t no_free_exch_xid;
34186 - atomic_t xid_not_found;
34187 - atomic_t xid_busy;
34188 - atomic_t seq_not_found;
34189 - atomic_t non_bls_resp;
34190 + atomic_unchecked_t no_free_exch;
34191 + atomic_unchecked_t no_free_exch_xid;
34192 + atomic_unchecked_t xid_not_found;
34193 + atomic_unchecked_t xid_busy;
34194 + atomic_unchecked_t seq_not_found;
34195 + atomic_unchecked_t non_bls_resp;
34196 } stats;
34197 };
34198
34199 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34200 /* allocate memory for exchange */
34201 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34202 if (!ep) {
34203 - atomic_inc(&mp->stats.no_free_exch);
34204 + atomic_inc_unchecked(&mp->stats.no_free_exch);
34205 goto out;
34206 }
34207 memset(ep, 0, sizeof(*ep));
34208 @@ -780,7 +780,7 @@ out:
34209 return ep;
34210 err:
34211 spin_unlock_bh(&pool->lock);
34212 - atomic_inc(&mp->stats.no_free_exch_xid);
34213 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34214 mempool_free(ep, mp->ep_pool);
34215 return NULL;
34216 }
34217 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34218 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34219 ep = fc_exch_find(mp, xid);
34220 if (!ep) {
34221 - atomic_inc(&mp->stats.xid_not_found);
34222 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34223 reject = FC_RJT_OX_ID;
34224 goto out;
34225 }
34226 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34227 ep = fc_exch_find(mp, xid);
34228 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34229 if (ep) {
34230 - atomic_inc(&mp->stats.xid_busy);
34231 + atomic_inc_unchecked(&mp->stats.xid_busy);
34232 reject = FC_RJT_RX_ID;
34233 goto rel;
34234 }
34235 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34236 }
34237 xid = ep->xid; /* get our XID */
34238 } else if (!ep) {
34239 - atomic_inc(&mp->stats.xid_not_found);
34240 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34241 reject = FC_RJT_RX_ID; /* XID not found */
34242 goto out;
34243 }
34244 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34245 } else {
34246 sp = &ep->seq;
34247 if (sp->id != fh->fh_seq_id) {
34248 - atomic_inc(&mp->stats.seq_not_found);
34249 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34250 if (f_ctl & FC_FC_END_SEQ) {
34251 /*
34252 * Update sequence_id based on incoming last
34253 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34254
34255 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34256 if (!ep) {
34257 - atomic_inc(&mp->stats.xid_not_found);
34258 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34259 goto out;
34260 }
34261 if (ep->esb_stat & ESB_ST_COMPLETE) {
34262 - atomic_inc(&mp->stats.xid_not_found);
34263 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34264 goto rel;
34265 }
34266 if (ep->rxid == FC_XID_UNKNOWN)
34267 ep->rxid = ntohs(fh->fh_rx_id);
34268 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34269 - atomic_inc(&mp->stats.xid_not_found);
34270 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34271 goto rel;
34272 }
34273 if (ep->did != ntoh24(fh->fh_s_id) &&
34274 ep->did != FC_FID_FLOGI) {
34275 - atomic_inc(&mp->stats.xid_not_found);
34276 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34277 goto rel;
34278 }
34279 sof = fr_sof(fp);
34280 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34281 sp->ssb_stat |= SSB_ST_RESP;
34282 sp->id = fh->fh_seq_id;
34283 } else if (sp->id != fh->fh_seq_id) {
34284 - atomic_inc(&mp->stats.seq_not_found);
34285 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34286 goto rel;
34287 }
34288
34289 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34290 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34291
34292 if (!sp)
34293 - atomic_inc(&mp->stats.xid_not_found);
34294 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34295 else
34296 - atomic_inc(&mp->stats.non_bls_resp);
34297 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
34298
34299 fc_frame_free(fp);
34300 }
34301 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34302 index db9238f..4378ed2 100644
34303 --- a/drivers/scsi/libsas/sas_ata.c
34304 +++ b/drivers/scsi/libsas/sas_ata.c
34305 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34306 .postreset = ata_std_postreset,
34307 .error_handler = ata_std_error_handler,
34308 .post_internal_cmd = sas_ata_post_internal,
34309 - .qc_defer = ata_std_qc_defer,
34310 + .qc_defer = ata_std_qc_defer,
34311 .qc_prep = ata_noop_qc_prep,
34312 .qc_issue = sas_ata_qc_issue,
34313 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34314 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34315 index bb4c8e0..f33d849 100644
34316 --- a/drivers/scsi/lpfc/lpfc.h
34317 +++ b/drivers/scsi/lpfc/lpfc.h
34318 @@ -425,7 +425,7 @@ struct lpfc_vport {
34319 struct dentry *debug_nodelist;
34320 struct dentry *vport_debugfs_root;
34321 struct lpfc_debugfs_trc *disc_trc;
34322 - atomic_t disc_trc_cnt;
34323 + atomic_unchecked_t disc_trc_cnt;
34324 #endif
34325 uint8_t stat_data_enabled;
34326 uint8_t stat_data_blocked;
34327 @@ -835,8 +835,8 @@ struct lpfc_hba {
34328 struct timer_list fabric_block_timer;
34329 unsigned long bit_flags;
34330 #define FABRIC_COMANDS_BLOCKED 0
34331 - atomic_t num_rsrc_err;
34332 - atomic_t num_cmd_success;
34333 + atomic_unchecked_t num_rsrc_err;
34334 + atomic_unchecked_t num_cmd_success;
34335 unsigned long last_rsrc_error_time;
34336 unsigned long last_ramp_down_time;
34337 unsigned long last_ramp_up_time;
34338 @@ -866,7 +866,7 @@ struct lpfc_hba {
34339
34340 struct dentry *debug_slow_ring_trc;
34341 struct lpfc_debugfs_trc *slow_ring_trc;
34342 - atomic_t slow_ring_trc_cnt;
34343 + atomic_unchecked_t slow_ring_trc_cnt;
34344 /* iDiag debugfs sub-directory */
34345 struct dentry *idiag_root;
34346 struct dentry *idiag_pci_cfg;
34347 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34348 index 2838259..a07cfb5 100644
34349 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
34350 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34351 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34352
34353 #include <linux/debugfs.h>
34354
34355 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34356 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34357 static unsigned long lpfc_debugfs_start_time = 0L;
34358
34359 /* iDiag */
34360 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34361 lpfc_debugfs_enable = 0;
34362
34363 len = 0;
34364 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34365 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34366 (lpfc_debugfs_max_disc_trc - 1);
34367 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34368 dtp = vport->disc_trc + i;
34369 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34370 lpfc_debugfs_enable = 0;
34371
34372 len = 0;
34373 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34374 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34375 (lpfc_debugfs_max_slow_ring_trc - 1);
34376 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34377 dtp = phba->slow_ring_trc + i;
34378 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34379 !vport || !vport->disc_trc)
34380 return;
34381
34382 - index = atomic_inc_return(&vport->disc_trc_cnt) &
34383 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34384 (lpfc_debugfs_max_disc_trc - 1);
34385 dtp = vport->disc_trc + index;
34386 dtp->fmt = fmt;
34387 dtp->data1 = data1;
34388 dtp->data2 = data2;
34389 dtp->data3 = data3;
34390 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34391 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34392 dtp->jif = jiffies;
34393 #endif
34394 return;
34395 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34396 !phba || !phba->slow_ring_trc)
34397 return;
34398
34399 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34400 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34401 (lpfc_debugfs_max_slow_ring_trc - 1);
34402 dtp = phba->slow_ring_trc + index;
34403 dtp->fmt = fmt;
34404 dtp->data1 = data1;
34405 dtp->data2 = data2;
34406 dtp->data3 = data3;
34407 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34408 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34409 dtp->jif = jiffies;
34410 #endif
34411 return;
34412 @@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34413 "slow_ring buffer\n");
34414 goto debug_failed;
34415 }
34416 - atomic_set(&phba->slow_ring_trc_cnt, 0);
34417 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34418 memset(phba->slow_ring_trc, 0,
34419 (sizeof(struct lpfc_debugfs_trc) *
34420 lpfc_debugfs_max_slow_ring_trc));
34421 @@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34422 "buffer\n");
34423 goto debug_failed;
34424 }
34425 - atomic_set(&vport->disc_trc_cnt, 0);
34426 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34427
34428 snprintf(name, sizeof(name), "discovery_trace");
34429 vport->debug_disc_trc =
34430 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34431 index 55bc4fc..a2a109c 100644
34432 --- a/drivers/scsi/lpfc/lpfc_init.c
34433 +++ b/drivers/scsi/lpfc/lpfc_init.c
34434 @@ -10027,8 +10027,10 @@ lpfc_init(void)
34435 printk(LPFC_COPYRIGHT "\n");
34436
34437 if (lpfc_enable_npiv) {
34438 - lpfc_transport_functions.vport_create = lpfc_vport_create;
34439 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34440 + pax_open_kernel();
34441 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34442 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34443 + pax_close_kernel();
34444 }
34445 lpfc_transport_template =
34446 fc_attach_transport(&lpfc_transport_functions);
34447 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34448 index 2e1e54e..1af0a0d 100644
34449 --- a/drivers/scsi/lpfc/lpfc_scsi.c
34450 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
34451 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34452 uint32_t evt_posted;
34453
34454 spin_lock_irqsave(&phba->hbalock, flags);
34455 - atomic_inc(&phba->num_rsrc_err);
34456 + atomic_inc_unchecked(&phba->num_rsrc_err);
34457 phba->last_rsrc_error_time = jiffies;
34458
34459 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34460 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34461 unsigned long flags;
34462 struct lpfc_hba *phba = vport->phba;
34463 uint32_t evt_posted;
34464 - atomic_inc(&phba->num_cmd_success);
34465 + atomic_inc_unchecked(&phba->num_cmd_success);
34466
34467 if (vport->cfg_lun_queue_depth <= queue_depth)
34468 return;
34469 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34470 unsigned long num_rsrc_err, num_cmd_success;
34471 int i;
34472
34473 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34474 - num_cmd_success = atomic_read(&phba->num_cmd_success);
34475 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34476 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34477
34478 vports = lpfc_create_vport_work_array(phba);
34479 if (vports != NULL)
34480 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34481 }
34482 }
34483 lpfc_destroy_vport_work_array(phba, vports);
34484 - atomic_set(&phba->num_rsrc_err, 0);
34485 - atomic_set(&phba->num_cmd_success, 0);
34486 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34487 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34488 }
34489
34490 /**
34491 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34492 }
34493 }
34494 lpfc_destroy_vport_work_array(phba, vports);
34495 - atomic_set(&phba->num_rsrc_err, 0);
34496 - atomic_set(&phba->num_cmd_success, 0);
34497 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34498 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34499 }
34500
34501 /**
34502 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34503 index 5163edb..7b142bc 100644
34504 --- a/drivers/scsi/pmcraid.c
34505 +++ b/drivers/scsi/pmcraid.c
34506 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34507 res->scsi_dev = scsi_dev;
34508 scsi_dev->hostdata = res;
34509 res->change_detected = 0;
34510 - atomic_set(&res->read_failures, 0);
34511 - atomic_set(&res->write_failures, 0);
34512 + atomic_set_unchecked(&res->read_failures, 0);
34513 + atomic_set_unchecked(&res->write_failures, 0);
34514 rc = 0;
34515 }
34516 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34517 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34518
34519 /* If this was a SCSI read/write command keep count of errors */
34520 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34521 - atomic_inc(&res->read_failures);
34522 + atomic_inc_unchecked(&res->read_failures);
34523 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34524 - atomic_inc(&res->write_failures);
34525 + atomic_inc_unchecked(&res->write_failures);
34526
34527 if (!RES_IS_GSCSI(res->cfg_entry) &&
34528 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34529 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34530 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34531 * hrrq_id assigned here in queuecommand
34532 */
34533 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34534 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34535 pinstance->num_hrrq;
34536 cmd->cmd_done = pmcraid_io_done;
34537
34538 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34539 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34540 * hrrq_id assigned here in queuecommand
34541 */
34542 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34543 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34544 pinstance->num_hrrq;
34545
34546 if (request_size) {
34547 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34548
34549 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34550 /* add resources only after host is added into system */
34551 - if (!atomic_read(&pinstance->expose_resources))
34552 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34553 return;
34554
34555 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34556 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34557 init_waitqueue_head(&pinstance->reset_wait_q);
34558
34559 atomic_set(&pinstance->outstanding_cmds, 0);
34560 - atomic_set(&pinstance->last_message_id, 0);
34561 - atomic_set(&pinstance->expose_resources, 0);
34562 + atomic_set_unchecked(&pinstance->last_message_id, 0);
34563 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34564
34565 INIT_LIST_HEAD(&pinstance->free_res_q);
34566 INIT_LIST_HEAD(&pinstance->used_res_q);
34567 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34568 /* Schedule worker thread to handle CCN and take care of adding and
34569 * removing devices to OS
34570 */
34571 - atomic_set(&pinstance->expose_resources, 1);
34572 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34573 schedule_work(&pinstance->worker_q);
34574 return rc;
34575
34576 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34577 index ca496c7..9c791d5 100644
34578 --- a/drivers/scsi/pmcraid.h
34579 +++ b/drivers/scsi/pmcraid.h
34580 @@ -748,7 +748,7 @@ struct pmcraid_instance {
34581 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34582
34583 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34584 - atomic_t last_message_id;
34585 + atomic_unchecked_t last_message_id;
34586
34587 /* configuration table */
34588 struct pmcraid_config_table *cfg_table;
34589 @@ -777,7 +777,7 @@ struct pmcraid_instance {
34590 atomic_t outstanding_cmds;
34591
34592 /* should add/delete resources to mid-layer now ?*/
34593 - atomic_t expose_resources;
34594 + atomic_unchecked_t expose_resources;
34595
34596
34597
34598 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34599 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34600 };
34601 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34602 - atomic_t read_failures; /* count of failed READ commands */
34603 - atomic_t write_failures; /* count of failed WRITE commands */
34604 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34605 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34606
34607 /* To indicate add/delete/modify during CCN */
34608 u8 change_detected;
34609 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34610 index fcf052c..a8025a4 100644
34611 --- a/drivers/scsi/qla2xxx/qla_def.h
34612 +++ b/drivers/scsi/qla2xxx/qla_def.h
34613 @@ -2244,7 +2244,7 @@ struct isp_operations {
34614 int (*get_flash_version) (struct scsi_qla_host *, void *);
34615 int (*start_scsi) (srb_t *);
34616 int (*abort_isp) (struct scsi_qla_host *);
34617 -};
34618 +} __no_const;
34619
34620 /* MSI-X Support *************************************************************/
34621
34622 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34623 index fd5edc6..4906148 100644
34624 --- a/drivers/scsi/qla4xxx/ql4_def.h
34625 +++ b/drivers/scsi/qla4xxx/ql4_def.h
34626 @@ -258,7 +258,7 @@ struct ddb_entry {
34627 * (4000 only) */
34628 atomic_t relogin_timer; /* Max Time to wait for
34629 * relogin to complete */
34630 - atomic_t relogin_retry_count; /* Num of times relogin has been
34631 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34632 * retried */
34633 uint32_t default_time2wait; /* Default Min time between
34634 * relogins (+aens) */
34635 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34636 index 4169c8b..a8b896b 100644
34637 --- a/drivers/scsi/qla4xxx/ql4_os.c
34638 +++ b/drivers/scsi/qla4xxx/ql4_os.c
34639 @@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34640 */
34641 if (!iscsi_is_session_online(cls_sess)) {
34642 /* Reset retry relogin timer */
34643 - atomic_inc(&ddb_entry->relogin_retry_count);
34644 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34645 DEBUG2(ql4_printk(KERN_INFO, ha,
34646 "%s: index[%d] relogin timed out-retrying"
34647 " relogin (%d), retry (%d)\n", __func__,
34648 ddb_entry->fw_ddb_index,
34649 - atomic_read(&ddb_entry->relogin_retry_count),
34650 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34651 ddb_entry->default_time2wait + 4));
34652 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34653 atomic_set(&ddb_entry->retry_relogin_timer,
34654 @@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34655
34656 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34657 atomic_set(&ddb_entry->relogin_timer, 0);
34658 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34659 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34660
34661 ddb_entry->default_relogin_timeout =
34662 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34663 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34664 index 2aeb2e9..46e3925 100644
34665 --- a/drivers/scsi/scsi.c
34666 +++ b/drivers/scsi/scsi.c
34667 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34668 unsigned long timeout;
34669 int rtn = 0;
34670
34671 - atomic_inc(&cmd->device->iorequest_cnt);
34672 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34673
34674 /* check if the device is still usable */
34675 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34676 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34677 index f85cfa6..a57c9e8 100644
34678 --- a/drivers/scsi/scsi_lib.c
34679 +++ b/drivers/scsi/scsi_lib.c
34680 @@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34681 shost = sdev->host;
34682 scsi_init_cmd_errh(cmd);
34683 cmd->result = DID_NO_CONNECT << 16;
34684 - atomic_inc(&cmd->device->iorequest_cnt);
34685 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34686
34687 /*
34688 * SCSI request completion path will do scsi_device_unbusy(),
34689 @@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34690
34691 INIT_LIST_HEAD(&cmd->eh_entry);
34692
34693 - atomic_inc(&cmd->device->iodone_cnt);
34694 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34695 if (cmd->result)
34696 - atomic_inc(&cmd->device->ioerr_cnt);
34697 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34698
34699 disposition = scsi_decide_disposition(cmd);
34700 if (disposition != SUCCESS &&
34701 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34702 index 04c2a27..9d8bd66 100644
34703 --- a/drivers/scsi/scsi_sysfs.c
34704 +++ b/drivers/scsi/scsi_sysfs.c
34705 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
34706 char *buf) \
34707 { \
34708 struct scsi_device *sdev = to_scsi_device(dev); \
34709 - unsigned long long count = atomic_read(&sdev->field); \
34710 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34711 return snprintf(buf, 20, "0x%llx\n", count); \
34712 } \
34713 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34714 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
34715 index 84a1fdf..693b0d6 100644
34716 --- a/drivers/scsi/scsi_tgt_lib.c
34717 +++ b/drivers/scsi/scsi_tgt_lib.c
34718 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
34719 int err;
34720
34721 dprintk("%lx %u\n", uaddr, len);
34722 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34723 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34724 if (err) {
34725 /*
34726 * TODO: need to fixup sg_tablesize, max_segment_size,
34727 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
34728 index 1b21491..1b7f60e 100644
34729 --- a/drivers/scsi/scsi_transport_fc.c
34730 +++ b/drivers/scsi/scsi_transport_fc.c
34731 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
34732 * Netlink Infrastructure
34733 */
34734
34735 -static atomic_t fc_event_seq;
34736 +static atomic_unchecked_t fc_event_seq;
34737
34738 /**
34739 * fc_get_event_number - Obtain the next sequential FC event number
34740 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34741 u32
34742 fc_get_event_number(void)
34743 {
34744 - return atomic_add_return(1, &fc_event_seq);
34745 + return atomic_add_return_unchecked(1, &fc_event_seq);
34746 }
34747 EXPORT_SYMBOL(fc_get_event_number);
34748
34749 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
34750 {
34751 int error;
34752
34753 - atomic_set(&fc_event_seq, 0);
34754 + atomic_set_unchecked(&fc_event_seq, 0);
34755
34756 error = transport_class_register(&fc_host_class);
34757 if (error)
34758 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
34759 char *cp;
34760
34761 *val = simple_strtoul(buf, &cp, 0);
34762 - if ((*cp && (*cp != '\n')) || (*val < 0))
34763 + if (*cp && (*cp != '\n'))
34764 return -EINVAL;
34765 /*
34766 * Check for overflow; dev_loss_tmo is u32
34767 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
34768 index 96029e6..4d77fa0 100644
34769 --- a/drivers/scsi/scsi_transport_iscsi.c
34770 +++ b/drivers/scsi/scsi_transport_iscsi.c
34771 @@ -79,7 +79,7 @@ struct iscsi_internal {
34772 struct transport_container session_cont;
34773 };
34774
34775 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34776 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34777 static struct workqueue_struct *iscsi_eh_timer_workq;
34778
34779 static DEFINE_IDA(iscsi_sess_ida);
34780 @@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
34781 int err;
34782
34783 ihost = shost->shost_data;
34784 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34785 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34786
34787 if (target_id == ISCSI_MAX_TARGET) {
34788 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
34789 @@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
34790 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34791 ISCSI_TRANSPORT_VERSION);
34792
34793 - atomic_set(&iscsi_session_nr, 0);
34794 + atomic_set_unchecked(&iscsi_session_nr, 0);
34795
34796 err = class_register(&iscsi_transport_class);
34797 if (err)
34798 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
34799 index 21a045e..ec89e03 100644
34800 --- a/drivers/scsi/scsi_transport_srp.c
34801 +++ b/drivers/scsi/scsi_transport_srp.c
34802 @@ -33,7 +33,7 @@
34803 #include "scsi_transport_srp_internal.h"
34804
34805 struct srp_host_attrs {
34806 - atomic_t next_port_id;
34807 + atomic_unchecked_t next_port_id;
34808 };
34809 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34810
34811 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
34812 struct Scsi_Host *shost = dev_to_shost(dev);
34813 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34814
34815 - atomic_set(&srp_host->next_port_id, 0);
34816 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34817 return 0;
34818 }
34819
34820 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
34821 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34822 rport->roles = ids->roles;
34823
34824 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34825 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34826 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34827
34828 transport_setup_device(&rport->dev);
34829 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
34830 index 441a1c5..07cece7 100644
34831 --- a/drivers/scsi/sg.c
34832 +++ b/drivers/scsi/sg.c
34833 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
34834 sdp->disk->disk_name,
34835 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34836 NULL,
34837 - (char *)arg);
34838 + (char __user *)arg);
34839 case BLKTRACESTART:
34840 return blk_trace_startstop(sdp->device->request_queue, 1);
34841 case BLKTRACESTOP:
34842 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
34843 const struct file_operations * fops;
34844 };
34845
34846 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34847 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34848 {"allow_dio", &adio_fops},
34849 {"debug", &debug_fops},
34850 {"def_reserved_size", &dressz_fops},
34851 @@ -2327,7 +2327,7 @@ sg_proc_init(void)
34852 {
34853 int k, mask;
34854 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34855 - struct sg_proc_leaf * leaf;
34856 + const struct sg_proc_leaf * leaf;
34857
34858 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34859 if (!sg_proc_sgp)
34860 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
34861 index f64250e..1ee3049 100644
34862 --- a/drivers/spi/spi-dw-pci.c
34863 +++ b/drivers/spi/spi-dw-pci.c
34864 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
34865 #define spi_resume NULL
34866 #endif
34867
34868 -static const struct pci_device_id pci_ids[] __devinitdata = {
34869 +static const struct pci_device_id pci_ids[] __devinitconst = {
34870 /* Intel MID platform SPI controller 0 */
34871 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34872 {},
34873 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
34874 index 77eae99..b7cdcc9 100644
34875 --- a/drivers/spi/spi.c
34876 +++ b/drivers/spi/spi.c
34877 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
34878 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34879
34880 /* portable code must never pass more than 32 bytes */
34881 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34882 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34883
34884 static u8 *buf;
34885
34886 diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
34887 index 436fe97..4082570 100644
34888 --- a/drivers/staging/gma500/power.c
34889 +++ b/drivers/staging/gma500/power.c
34890 @@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
34891 ret = gma_resume_pci(dev->pdev);
34892 if (ret == 0) {
34893 /* FIXME: we want to defer this for Medfield/Oaktrail */
34894 - gma_resume_display(dev);
34895 + gma_resume_display(dev->pdev);
34896 psb_irq_preinstall(dev);
34897 psb_irq_postinstall(dev);
34898 pm_runtime_get(&dev->pdev->dev);
34899 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
34900 index bafccb3..e3ac78d 100644
34901 --- a/drivers/staging/hv/rndis_filter.c
34902 +++ b/drivers/staging/hv/rndis_filter.c
34903 @@ -42,7 +42,7 @@ struct rndis_device {
34904
34905 enum rndis_device_state state;
34906 bool link_state;
34907 - atomic_t new_req_id;
34908 + atomic_unchecked_t new_req_id;
34909
34910 spinlock_t request_lock;
34911 struct list_head req_list;
34912 @@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34913 * template
34914 */
34915 set = &rndis_msg->msg.set_req;
34916 - set->req_id = atomic_inc_return(&dev->new_req_id);
34917 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34918
34919 /* Add to the request list */
34920 spin_lock_irqsave(&dev->request_lock, flags);
34921 @@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34922
34923 /* Setup the rndis set */
34924 halt = &request->request_msg.msg.halt_req;
34925 - halt->req_id = atomic_inc_return(&dev->new_req_id);
34926 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34927
34928 /* Ignore return since this msg is optional. */
34929 rndis_filter_send_request(dev, request);
34930 diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
34931 index 9e8f010..af9efb5 100644
34932 --- a/drivers/staging/iio/buffer_generic.h
34933 +++ b/drivers/staging/iio/buffer_generic.h
34934 @@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
34935
34936 int (*is_enabled)(struct iio_buffer *buffer);
34937 int (*enable)(struct iio_buffer *buffer);
34938 -};
34939 +} __no_const;
34940
34941 /**
34942 * struct iio_buffer_setup_ops - buffer setup related callbacks
34943 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
34944 index 8b307b4..a97ac91 100644
34945 --- a/drivers/staging/octeon/ethernet-rx.c
34946 +++ b/drivers/staging/octeon/ethernet-rx.c
34947 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
34948 /* Increment RX stats for virtual ports */
34949 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34950 #ifdef CONFIG_64BIT
34951 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34952 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34953 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34954 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34955 #else
34956 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34957 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34958 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34959 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34960 #endif
34961 }
34962 netif_receive_skb(skb);
34963 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
34964 dev->name);
34965 */
34966 #ifdef CONFIG_64BIT
34967 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34968 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34969 #else
34970 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34971 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
34972 #endif
34973 dev_kfree_skb_irq(skb);
34974 }
34975 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
34976 index 076f866..2308070 100644
34977 --- a/drivers/staging/octeon/ethernet.c
34978 +++ b/drivers/staging/octeon/ethernet.c
34979 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
34980 * since the RX tasklet also increments it.
34981 */
34982 #ifdef CONFIG_64BIT
34983 - atomic64_add(rx_status.dropped_packets,
34984 - (atomic64_t *)&priv->stats.rx_dropped);
34985 + atomic64_add_unchecked(rx_status.dropped_packets,
34986 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34987 #else
34988 - atomic_add(rx_status.dropped_packets,
34989 - (atomic_t *)&priv->stats.rx_dropped);
34990 + atomic_add_unchecked(rx_status.dropped_packets,
34991 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
34992 #endif
34993 }
34994
34995 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
34996 index 7a19555..466456d 100644
34997 --- a/drivers/staging/pohmelfs/inode.c
34998 +++ b/drivers/staging/pohmelfs/inode.c
34999 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35000 mutex_init(&psb->mcache_lock);
35001 psb->mcache_root = RB_ROOT;
35002 psb->mcache_timeout = msecs_to_jiffies(5000);
35003 - atomic_long_set(&psb->mcache_gen, 0);
35004 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
35005
35006 psb->trans_max_pages = 100;
35007
35008 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35009 INIT_LIST_HEAD(&psb->crypto_ready_list);
35010 INIT_LIST_HEAD(&psb->crypto_active_list);
35011
35012 - atomic_set(&psb->trans_gen, 1);
35013 + atomic_set_unchecked(&psb->trans_gen, 1);
35014 atomic_long_set(&psb->total_inodes, 0);
35015
35016 mutex_init(&psb->state_lock);
35017 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35018 index e22665c..a2a9390 100644
35019 --- a/drivers/staging/pohmelfs/mcache.c
35020 +++ b/drivers/staging/pohmelfs/mcache.c
35021 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35022 m->data = data;
35023 m->start = start;
35024 m->size = size;
35025 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
35026 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35027
35028 mutex_lock(&psb->mcache_lock);
35029 err = pohmelfs_mcache_insert(psb, m);
35030 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35031 index 985b6b7..7699e05 100644
35032 --- a/drivers/staging/pohmelfs/netfs.h
35033 +++ b/drivers/staging/pohmelfs/netfs.h
35034 @@ -571,14 +571,14 @@ struct pohmelfs_config;
35035 struct pohmelfs_sb {
35036 struct rb_root mcache_root;
35037 struct mutex mcache_lock;
35038 - atomic_long_t mcache_gen;
35039 + atomic_long_unchecked_t mcache_gen;
35040 unsigned long mcache_timeout;
35041
35042 unsigned int idx;
35043
35044 unsigned int trans_retries;
35045
35046 - atomic_t trans_gen;
35047 + atomic_unchecked_t trans_gen;
35048
35049 unsigned int crypto_attached_size;
35050 unsigned int crypto_align_size;
35051 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35052 index 06c1a74..866eebc 100644
35053 --- a/drivers/staging/pohmelfs/trans.c
35054 +++ b/drivers/staging/pohmelfs/trans.c
35055 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35056 int err;
35057 struct netfs_cmd *cmd = t->iovec.iov_base;
35058
35059 - t->gen = atomic_inc_return(&psb->trans_gen);
35060 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35061
35062 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35063 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35064 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35065 index 86308a0..feaa925 100644
35066 --- a/drivers/staging/rtl8712/rtl871x_io.h
35067 +++ b/drivers/staging/rtl8712/rtl871x_io.h
35068 @@ -108,7 +108,7 @@ struct _io_ops {
35069 u8 *pmem);
35070 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35071 u8 *pmem);
35072 -};
35073 +} __no_const;
35074
35075 struct io_req {
35076 struct list_head list;
35077 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35078 index c7b5e8b..783d6cb 100644
35079 --- a/drivers/staging/sbe-2t3e3/netdev.c
35080 +++ b/drivers/staging/sbe-2t3e3/netdev.c
35081 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35082 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35083
35084 if (rlen)
35085 - if (copy_to_user(data, &resp, rlen))
35086 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35087 return -EFAULT;
35088
35089 return 0;
35090 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35091 index be21617..0954e45 100644
35092 --- a/drivers/staging/usbip/usbip_common.h
35093 +++ b/drivers/staging/usbip/usbip_common.h
35094 @@ -289,7 +289,7 @@ struct usbip_device {
35095 void (*shutdown)(struct usbip_device *);
35096 void (*reset)(struct usbip_device *);
35097 void (*unusable)(struct usbip_device *);
35098 - } eh_ops;
35099 + } __no_const eh_ops;
35100 };
35101
35102 #if 0
35103 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35104 index 88b3298..3783eee 100644
35105 --- a/drivers/staging/usbip/vhci.h
35106 +++ b/drivers/staging/usbip/vhci.h
35107 @@ -88,7 +88,7 @@ struct vhci_hcd {
35108 unsigned resuming:1;
35109 unsigned long re_timeout;
35110
35111 - atomic_t seqnum;
35112 + atomic_unchecked_t seqnum;
35113
35114 /*
35115 * NOTE:
35116 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35117 index 2ee97e2..0420b86 100644
35118 --- a/drivers/staging/usbip/vhci_hcd.c
35119 +++ b/drivers/staging/usbip/vhci_hcd.c
35120 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35121 return;
35122 }
35123
35124 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35125 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35126 if (priv->seqnum == 0xffff)
35127 dev_info(&urb->dev->dev, "seqnum max\n");
35128
35129 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35130 return -ENOMEM;
35131 }
35132
35133 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35134 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35135 if (unlink->seqnum == 0xffff)
35136 pr_info("seqnum max\n");
35137
35138 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35139 vdev->rhport = rhport;
35140 }
35141
35142 - atomic_set(&vhci->seqnum, 0);
35143 + atomic_set_unchecked(&vhci->seqnum, 0);
35144 spin_lock_init(&vhci->lock);
35145
35146 hcd->power_budget = 0; /* no limit */
35147 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35148 index 3872b8c..fe6d2f4 100644
35149 --- a/drivers/staging/usbip/vhci_rx.c
35150 +++ b/drivers/staging/usbip/vhci_rx.c
35151 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35152 if (!urb) {
35153 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35154 pr_info("max seqnum %d\n",
35155 - atomic_read(&the_controller->seqnum));
35156 + atomic_read_unchecked(&the_controller->seqnum));
35157 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35158 return;
35159 }
35160 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35161 index 7735027..30eed13 100644
35162 --- a/drivers/staging/vt6655/hostap.c
35163 +++ b/drivers/staging/vt6655/hostap.c
35164 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35165 *
35166 */
35167
35168 +static net_device_ops_no_const apdev_netdev_ops;
35169 +
35170 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35171 {
35172 PSDevice apdev_priv;
35173 struct net_device *dev = pDevice->dev;
35174 int ret;
35175 - const struct net_device_ops apdev_netdev_ops = {
35176 - .ndo_start_xmit = pDevice->tx_80211,
35177 - };
35178
35179 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35180
35181 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35182 *apdev_priv = *pDevice;
35183 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35184
35185 + /* only half broken now */
35186 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35187 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35188
35189 pDevice->apdev->type = ARPHRD_IEEE80211;
35190 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35191 index 51b5adf..098e320 100644
35192 --- a/drivers/staging/vt6656/hostap.c
35193 +++ b/drivers/staging/vt6656/hostap.c
35194 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35195 *
35196 */
35197
35198 +static net_device_ops_no_const apdev_netdev_ops;
35199 +
35200 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35201 {
35202 PSDevice apdev_priv;
35203 struct net_device *dev = pDevice->dev;
35204 int ret;
35205 - const struct net_device_ops apdev_netdev_ops = {
35206 - .ndo_start_xmit = pDevice->tx_80211,
35207 - };
35208
35209 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35210
35211 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35212 *apdev_priv = *pDevice;
35213 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35214
35215 + /* only half broken now */
35216 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35217 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35218
35219 pDevice->apdev->type = ARPHRD_IEEE80211;
35220 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35221 index 7843dfd..3db105f 100644
35222 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
35223 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35224 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35225
35226 struct usbctlx_completor {
35227 int (*complete) (struct usbctlx_completor *);
35228 -};
35229 +} __no_const;
35230
35231 static int
35232 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35233 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35234 index 1ca66ea..76f1343 100644
35235 --- a/drivers/staging/zcache/tmem.c
35236 +++ b/drivers/staging/zcache/tmem.c
35237 @@ -39,7 +39,7 @@
35238 * A tmem host implementation must use this function to register callbacks
35239 * for memory allocation.
35240 */
35241 -static struct tmem_hostops tmem_hostops;
35242 +static tmem_hostops_no_const tmem_hostops;
35243
35244 static void tmem_objnode_tree_init(void);
35245
35246 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35247 * A tmem host implementation must use this function to register
35248 * callbacks for a page-accessible memory (PAM) implementation
35249 */
35250 -static struct tmem_pamops tmem_pamops;
35251 +static tmem_pamops_no_const tmem_pamops;
35252
35253 void tmem_register_pamops(struct tmem_pamops *m)
35254 {
35255 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35256 index ed147c4..94fc3c6 100644
35257 --- a/drivers/staging/zcache/tmem.h
35258 +++ b/drivers/staging/zcache/tmem.h
35259 @@ -180,6 +180,7 @@ struct tmem_pamops {
35260 void (*new_obj)(struct tmem_obj *);
35261 int (*replace_in_obj)(void *, struct tmem_obj *);
35262 };
35263 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35264 extern void tmem_register_pamops(struct tmem_pamops *m);
35265
35266 /* memory allocation methods provided by the host implementation */
35267 @@ -189,6 +190,7 @@ struct tmem_hostops {
35268 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35269 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35270 };
35271 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35272 extern void tmem_register_hostops(struct tmem_hostops *m);
35273
35274 /* core tmem accessor functions */
35275 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35276 index 8599545..7761358 100644
35277 --- a/drivers/target/iscsi/iscsi_target.c
35278 +++ b/drivers/target/iscsi/iscsi_target.c
35279 @@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35280 * outstanding_r2ts reaches zero, go ahead and send the delayed
35281 * TASK_ABORTED status.
35282 */
35283 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35284 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35285 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35286 if (--cmd->outstanding_r2ts < 1) {
35287 iscsit_stop_dataout_timer(cmd);
35288 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35289 index 6845228..df77141 100644
35290 --- a/drivers/target/target_core_tmr.c
35291 +++ b/drivers/target/target_core_tmr.c
35292 @@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35293 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35294 cmd->t_task_list_num,
35295 atomic_read(&cmd->t_task_cdbs_left),
35296 - atomic_read(&cmd->t_task_cdbs_sent),
35297 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35298 atomic_read(&cmd->t_transport_active),
35299 atomic_read(&cmd->t_transport_stop),
35300 atomic_read(&cmd->t_transport_sent));
35301 @@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35302 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35303 " task: %p, t_fe_count: %d dev: %p\n", task,
35304 fe_count, dev);
35305 - atomic_set(&cmd->t_transport_aborted, 1);
35306 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35307 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35308
35309 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35310 @@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35311 }
35312 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35313 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35314 - atomic_set(&cmd->t_transport_aborted, 1);
35315 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35316 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35317
35318 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35319 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35320 index e87d0eb..856cbcc 100644
35321 --- a/drivers/target/target_core_transport.c
35322 +++ b/drivers/target/target_core_transport.c
35323 @@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35324
35325 dev->queue_depth = dev_limits->queue_depth;
35326 atomic_set(&dev->depth_left, dev->queue_depth);
35327 - atomic_set(&dev->dev_ordered_id, 0);
35328 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
35329
35330 se_dev_set_default_attribs(dev, dev_limits);
35331
35332 @@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35333 * Used to determine when ORDERED commands should go from
35334 * Dormant to Active status.
35335 */
35336 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35337 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35338 smp_mb__after_atomic_inc();
35339 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35340 cmd->se_ordered_id, cmd->sam_task_attr,
35341 @@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35342 " t_transport_active: %d t_transport_stop: %d"
35343 " t_transport_sent: %d\n", cmd->t_task_list_num,
35344 atomic_read(&cmd->t_task_cdbs_left),
35345 - atomic_read(&cmd->t_task_cdbs_sent),
35346 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35347 atomic_read(&cmd->t_task_cdbs_ex_left),
35348 atomic_read(&cmd->t_transport_active),
35349 atomic_read(&cmd->t_transport_stop),
35350 @@ -2089,9 +2089,9 @@ check_depth:
35351
35352 spin_lock_irqsave(&cmd->t_state_lock, flags);
35353 task->task_flags |= (TF_ACTIVE | TF_SENT);
35354 - atomic_inc(&cmd->t_task_cdbs_sent);
35355 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35356
35357 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
35358 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35359 cmd->t_task_list_num)
35360 atomic_set(&cmd->t_transport_sent, 1);
35361
35362 @@ -4260,7 +4260,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35363 atomic_set(&cmd->transport_lun_stop, 0);
35364 }
35365 if (!atomic_read(&cmd->t_transport_active) ||
35366 - atomic_read(&cmd->t_transport_aborted)) {
35367 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
35368 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35369 return false;
35370 }
35371 @@ -4509,7 +4509,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35372 {
35373 int ret = 0;
35374
35375 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
35376 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35377 if (!send_status ||
35378 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35379 return 1;
35380 @@ -4546,7 +4546,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35381 */
35382 if (cmd->data_direction == DMA_TO_DEVICE) {
35383 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35384 - atomic_inc(&cmd->t_transport_aborted);
35385 + atomic_inc_unchecked(&cmd->t_transport_aborted);
35386 smp_mb__after_atomic_inc();
35387 }
35388 }
35389 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35390 index b9040be..e3f5aab 100644
35391 --- a/drivers/tty/hvc/hvcs.c
35392 +++ b/drivers/tty/hvc/hvcs.c
35393 @@ -83,6 +83,7 @@
35394 #include <asm/hvcserver.h>
35395 #include <asm/uaccess.h>
35396 #include <asm/vio.h>
35397 +#include <asm/local.h>
35398
35399 /*
35400 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35401 @@ -270,7 +271,7 @@ struct hvcs_struct {
35402 unsigned int index;
35403
35404 struct tty_struct *tty;
35405 - int open_count;
35406 + local_t open_count;
35407
35408 /*
35409 * Used to tell the driver kernel_thread what operations need to take
35410 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35411
35412 spin_lock_irqsave(&hvcsd->lock, flags);
35413
35414 - if (hvcsd->open_count > 0) {
35415 + if (local_read(&hvcsd->open_count) > 0) {
35416 spin_unlock_irqrestore(&hvcsd->lock, flags);
35417 printk(KERN_INFO "HVCS: vterm state unchanged. "
35418 "The hvcs device node is still in use.\n");
35419 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35420 if ((retval = hvcs_partner_connect(hvcsd)))
35421 goto error_release;
35422
35423 - hvcsd->open_count = 1;
35424 + local_set(&hvcsd->open_count, 1);
35425 hvcsd->tty = tty;
35426 tty->driver_data = hvcsd;
35427
35428 @@ -1179,7 +1180,7 @@ fast_open:
35429
35430 spin_lock_irqsave(&hvcsd->lock, flags);
35431 kref_get(&hvcsd->kref);
35432 - hvcsd->open_count++;
35433 + local_inc(&hvcsd->open_count);
35434 hvcsd->todo_mask |= HVCS_SCHED_READ;
35435 spin_unlock_irqrestore(&hvcsd->lock, flags);
35436
35437 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35438 hvcsd = tty->driver_data;
35439
35440 spin_lock_irqsave(&hvcsd->lock, flags);
35441 - if (--hvcsd->open_count == 0) {
35442 + if (local_dec_and_test(&hvcsd->open_count)) {
35443
35444 vio_disable_interrupts(hvcsd->vdev);
35445
35446 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35447 free_irq(irq, hvcsd);
35448 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35449 return;
35450 - } else if (hvcsd->open_count < 0) {
35451 + } else if (local_read(&hvcsd->open_count) < 0) {
35452 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35453 " is missmanaged.\n",
35454 - hvcsd->vdev->unit_address, hvcsd->open_count);
35455 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35456 }
35457
35458 spin_unlock_irqrestore(&hvcsd->lock, flags);
35459 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35460
35461 spin_lock_irqsave(&hvcsd->lock, flags);
35462 /* Preserve this so that we know how many kref refs to put */
35463 - temp_open_count = hvcsd->open_count;
35464 + temp_open_count = local_read(&hvcsd->open_count);
35465
35466 /*
35467 * Don't kref put inside the spinlock because the destruction
35468 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35469 hvcsd->tty->driver_data = NULL;
35470 hvcsd->tty = NULL;
35471
35472 - hvcsd->open_count = 0;
35473 + local_set(&hvcsd->open_count, 0);
35474
35475 /* This will drop any buffered data on the floor which is OK in a hangup
35476 * scenario. */
35477 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35478 * the middle of a write operation? This is a crummy place to do this
35479 * but we want to keep it all in the spinlock.
35480 */
35481 - if (hvcsd->open_count <= 0) {
35482 + if (local_read(&hvcsd->open_count) <= 0) {
35483 spin_unlock_irqrestore(&hvcsd->lock, flags);
35484 return -ENODEV;
35485 }
35486 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35487 {
35488 struct hvcs_struct *hvcsd = tty->driver_data;
35489
35490 - if (!hvcsd || hvcsd->open_count <= 0)
35491 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35492 return 0;
35493
35494 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35495 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35496 index ef92869..f4ebd88 100644
35497 --- a/drivers/tty/ipwireless/tty.c
35498 +++ b/drivers/tty/ipwireless/tty.c
35499 @@ -29,6 +29,7 @@
35500 #include <linux/tty_driver.h>
35501 #include <linux/tty_flip.h>
35502 #include <linux/uaccess.h>
35503 +#include <asm/local.h>
35504
35505 #include "tty.h"
35506 #include "network.h"
35507 @@ -51,7 +52,7 @@ struct ipw_tty {
35508 int tty_type;
35509 struct ipw_network *network;
35510 struct tty_struct *linux_tty;
35511 - int open_count;
35512 + local_t open_count;
35513 unsigned int control_lines;
35514 struct mutex ipw_tty_mutex;
35515 int tx_bytes_queued;
35516 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35517 mutex_unlock(&tty->ipw_tty_mutex);
35518 return -ENODEV;
35519 }
35520 - if (tty->open_count == 0)
35521 + if (local_read(&tty->open_count) == 0)
35522 tty->tx_bytes_queued = 0;
35523
35524 - tty->open_count++;
35525 + local_inc(&tty->open_count);
35526
35527 tty->linux_tty = linux_tty;
35528 linux_tty->driver_data = tty;
35529 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35530
35531 static void do_ipw_close(struct ipw_tty *tty)
35532 {
35533 - tty->open_count--;
35534 -
35535 - if (tty->open_count == 0) {
35536 + if (local_dec_return(&tty->open_count) == 0) {
35537 struct tty_struct *linux_tty = tty->linux_tty;
35538
35539 if (linux_tty != NULL) {
35540 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35541 return;
35542
35543 mutex_lock(&tty->ipw_tty_mutex);
35544 - if (tty->open_count == 0) {
35545 + if (local_read(&tty->open_count) == 0) {
35546 mutex_unlock(&tty->ipw_tty_mutex);
35547 return;
35548 }
35549 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35550 return;
35551 }
35552
35553 - if (!tty->open_count) {
35554 + if (!local_read(&tty->open_count)) {
35555 mutex_unlock(&tty->ipw_tty_mutex);
35556 return;
35557 }
35558 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35559 return -ENODEV;
35560
35561 mutex_lock(&tty->ipw_tty_mutex);
35562 - if (!tty->open_count) {
35563 + if (!local_read(&tty->open_count)) {
35564 mutex_unlock(&tty->ipw_tty_mutex);
35565 return -EINVAL;
35566 }
35567 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35568 if (!tty)
35569 return -ENODEV;
35570
35571 - if (!tty->open_count)
35572 + if (!local_read(&tty->open_count))
35573 return -EINVAL;
35574
35575 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35576 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35577 if (!tty)
35578 return 0;
35579
35580 - if (!tty->open_count)
35581 + if (!local_read(&tty->open_count))
35582 return 0;
35583
35584 return tty->tx_bytes_queued;
35585 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35586 if (!tty)
35587 return -ENODEV;
35588
35589 - if (!tty->open_count)
35590 + if (!local_read(&tty->open_count))
35591 return -EINVAL;
35592
35593 return get_control_lines(tty);
35594 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35595 if (!tty)
35596 return -ENODEV;
35597
35598 - if (!tty->open_count)
35599 + if (!local_read(&tty->open_count))
35600 return -EINVAL;
35601
35602 return set_control_lines(tty, set, clear);
35603 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35604 if (!tty)
35605 return -ENODEV;
35606
35607 - if (!tty->open_count)
35608 + if (!local_read(&tty->open_count))
35609 return -EINVAL;
35610
35611 /* FIXME: Exactly how is the tty object locked here .. */
35612 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35613 against a parallel ioctl etc */
35614 mutex_lock(&ttyj->ipw_tty_mutex);
35615 }
35616 - while (ttyj->open_count)
35617 + while (local_read(&ttyj->open_count))
35618 do_ipw_close(ttyj);
35619 ipwireless_disassociate_network_ttys(network,
35620 ttyj->channel_idx);
35621 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35622 index fc7bbba..9527e93 100644
35623 --- a/drivers/tty/n_gsm.c
35624 +++ b/drivers/tty/n_gsm.c
35625 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35626 kref_init(&dlci->ref);
35627 mutex_init(&dlci->mutex);
35628 dlci->fifo = &dlci->_fifo;
35629 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35630 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35631 kfree(dlci);
35632 return NULL;
35633 }
35634 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35635 index 39d6ab6..eb97f41 100644
35636 --- a/drivers/tty/n_tty.c
35637 +++ b/drivers/tty/n_tty.c
35638 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35639 {
35640 *ops = tty_ldisc_N_TTY;
35641 ops->owner = NULL;
35642 - ops->refcount = ops->flags = 0;
35643 + atomic_set(&ops->refcount, 0);
35644 + ops->flags = 0;
35645 }
35646 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35647 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35648 index e18604b..a7d5a11 100644
35649 --- a/drivers/tty/pty.c
35650 +++ b/drivers/tty/pty.c
35651 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35652 register_sysctl_table(pty_root_table);
35653
35654 /* Now create the /dev/ptmx special device */
35655 + pax_open_kernel();
35656 tty_default_fops(&ptmx_fops);
35657 - ptmx_fops.open = ptmx_open;
35658 + *(void **)&ptmx_fops.open = ptmx_open;
35659 + pax_close_kernel();
35660
35661 cdev_init(&ptmx_cdev, &ptmx_fops);
35662 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35663 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35664 index 2b42a01..32a2ed3 100644
35665 --- a/drivers/tty/serial/kgdboc.c
35666 +++ b/drivers/tty/serial/kgdboc.c
35667 @@ -24,8 +24,9 @@
35668 #define MAX_CONFIG_LEN 40
35669
35670 static struct kgdb_io kgdboc_io_ops;
35671 +static struct kgdb_io kgdboc_io_ops_console;
35672
35673 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35674 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35675 static int configured = -1;
35676
35677 static char config[MAX_CONFIG_LEN];
35678 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35679 kgdboc_unregister_kbd();
35680 if (configured == 1)
35681 kgdb_unregister_io_module(&kgdboc_io_ops);
35682 + else if (configured == 2)
35683 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
35684 }
35685
35686 static int configure_kgdboc(void)
35687 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35688 int err;
35689 char *cptr = config;
35690 struct console *cons;
35691 + int is_console = 0;
35692
35693 err = kgdboc_option_setup(config);
35694 if (err || !strlen(config) || isspace(config[0]))
35695 goto noconfig;
35696
35697 err = -ENODEV;
35698 - kgdboc_io_ops.is_console = 0;
35699 kgdb_tty_driver = NULL;
35700
35701 kgdboc_use_kms = 0;
35702 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35703 int idx;
35704 if (cons->device && cons->device(cons, &idx) == p &&
35705 idx == tty_line) {
35706 - kgdboc_io_ops.is_console = 1;
35707 + is_console = 1;
35708 break;
35709 }
35710 cons = cons->next;
35711 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
35712 kgdb_tty_line = tty_line;
35713
35714 do_register:
35715 - err = kgdb_register_io_module(&kgdboc_io_ops);
35716 + if (is_console) {
35717 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
35718 + configured = 2;
35719 + } else {
35720 + err = kgdb_register_io_module(&kgdboc_io_ops);
35721 + configured = 1;
35722 + }
35723 if (err)
35724 goto noconfig;
35725
35726 - configured = 1;
35727 -
35728 return 0;
35729
35730 noconfig:
35731 @@ -213,7 +220,7 @@ noconfig:
35732 static int __init init_kgdboc(void)
35733 {
35734 /* Already configured? */
35735 - if (configured == 1)
35736 + if (configured >= 1)
35737 return 0;
35738
35739 return configure_kgdboc();
35740 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
35741 if (config[len - 1] == '\n')
35742 config[len - 1] = '\0';
35743
35744 - if (configured == 1)
35745 + if (configured >= 1)
35746 cleanup_kgdboc();
35747
35748 /* Go and configure with the new params. */
35749 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
35750 .post_exception = kgdboc_post_exp_handler,
35751 };
35752
35753 +static struct kgdb_io kgdboc_io_ops_console = {
35754 + .name = "kgdboc",
35755 + .read_char = kgdboc_get_char,
35756 + .write_char = kgdboc_put_char,
35757 + .pre_exception = kgdboc_pre_exp_handler,
35758 + .post_exception = kgdboc_post_exp_handler,
35759 + .is_console = 1
35760 +};
35761 +
35762 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35763 /* This is only available if kgdboc is a built in for early debugging */
35764 static int __init kgdboc_early_init(char *opt)
35765 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
35766 index 05085be..67eadb0 100644
35767 --- a/drivers/tty/tty_io.c
35768 +++ b/drivers/tty/tty_io.c
35769 @@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35770
35771 void tty_default_fops(struct file_operations *fops)
35772 {
35773 - *fops = tty_fops;
35774 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35775 }
35776
35777 /*
35778 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
35779 index 8e0924f..4204eb4 100644
35780 --- a/drivers/tty/tty_ldisc.c
35781 +++ b/drivers/tty/tty_ldisc.c
35782 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
35783 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35784 struct tty_ldisc_ops *ldo = ld->ops;
35785
35786 - ldo->refcount--;
35787 + atomic_dec(&ldo->refcount);
35788 module_put(ldo->owner);
35789 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35790
35791 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
35792 spin_lock_irqsave(&tty_ldisc_lock, flags);
35793 tty_ldiscs[disc] = new_ldisc;
35794 new_ldisc->num = disc;
35795 - new_ldisc->refcount = 0;
35796 + atomic_set(&new_ldisc->refcount, 0);
35797 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35798
35799 return ret;
35800 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
35801 return -EINVAL;
35802
35803 spin_lock_irqsave(&tty_ldisc_lock, flags);
35804 - if (tty_ldiscs[disc]->refcount)
35805 + if (atomic_read(&tty_ldiscs[disc]->refcount))
35806 ret = -EBUSY;
35807 else
35808 tty_ldiscs[disc] = NULL;
35809 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
35810 if (ldops) {
35811 ret = ERR_PTR(-EAGAIN);
35812 if (try_module_get(ldops->owner)) {
35813 - ldops->refcount++;
35814 + atomic_inc(&ldops->refcount);
35815 ret = ldops;
35816 }
35817 }
35818 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
35819 unsigned long flags;
35820
35821 spin_lock_irqsave(&tty_ldisc_lock, flags);
35822 - ldops->refcount--;
35823 + atomic_dec(&ldops->refcount);
35824 module_put(ldops->owner);
35825 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35826 }
35827 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
35828 index a605549..6bd3c96 100644
35829 --- a/drivers/tty/vt/keyboard.c
35830 +++ b/drivers/tty/vt/keyboard.c
35831 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
35832 kbd->kbdmode == VC_OFF) &&
35833 value != KVAL(K_SAK))
35834 return; /* SAK is allowed even in raw mode */
35835 +
35836 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35837 + {
35838 + void *func = fn_handler[value];
35839 + if (func == fn_show_state || func == fn_show_ptregs ||
35840 + func == fn_show_mem)
35841 + return;
35842 + }
35843 +#endif
35844 +
35845 fn_handler[value](vc);
35846 }
35847
35848 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
35849 index 5e096f4..0da1363 100644
35850 --- a/drivers/tty/vt/vt_ioctl.c
35851 +++ b/drivers/tty/vt/vt_ioctl.c
35852 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35853 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35854 return -EFAULT;
35855
35856 - if (!capable(CAP_SYS_TTY_CONFIG))
35857 - perm = 0;
35858 -
35859 switch (cmd) {
35860 case KDGKBENT:
35861 key_map = key_maps[s];
35862 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35863 val = (i ? K_HOLE : K_NOSUCHMAP);
35864 return put_user(val, &user_kbe->kb_value);
35865 case KDSKBENT:
35866 + if (!capable(CAP_SYS_TTY_CONFIG))
35867 + perm = 0;
35868 +
35869 if (!perm)
35870 return -EPERM;
35871 if (!i && v == K_NOSUCHMAP) {
35872 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35873 int i, j, k;
35874 int ret;
35875
35876 - if (!capable(CAP_SYS_TTY_CONFIG))
35877 - perm = 0;
35878 -
35879 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35880 if (!kbs) {
35881 ret = -ENOMEM;
35882 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35883 kfree(kbs);
35884 return ((p && *p) ? -EOVERFLOW : 0);
35885 case KDSKBSENT:
35886 + if (!capable(CAP_SYS_TTY_CONFIG))
35887 + perm = 0;
35888 +
35889 if (!perm) {
35890 ret = -EPERM;
35891 goto reterr;
35892 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
35893 index a783d53..cb30d94 100644
35894 --- a/drivers/uio/uio.c
35895 +++ b/drivers/uio/uio.c
35896 @@ -25,6 +25,7 @@
35897 #include <linux/kobject.h>
35898 #include <linux/cdev.h>
35899 #include <linux/uio_driver.h>
35900 +#include <asm/local.h>
35901
35902 #define UIO_MAX_DEVICES (1U << MINORBITS)
35903
35904 @@ -32,10 +33,10 @@ struct uio_device {
35905 struct module *owner;
35906 struct device *dev;
35907 int minor;
35908 - atomic_t event;
35909 + atomic_unchecked_t event;
35910 struct fasync_struct *async_queue;
35911 wait_queue_head_t wait;
35912 - int vma_count;
35913 + local_t vma_count;
35914 struct uio_info *info;
35915 struct kobject *map_dir;
35916 struct kobject *portio_dir;
35917 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
35918 struct device_attribute *attr, char *buf)
35919 {
35920 struct uio_device *idev = dev_get_drvdata(dev);
35921 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
35922 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
35923 }
35924
35925 static struct device_attribute uio_class_attributes[] = {
35926 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
35927 {
35928 struct uio_device *idev = info->uio_dev;
35929
35930 - atomic_inc(&idev->event);
35931 + atomic_inc_unchecked(&idev->event);
35932 wake_up_interruptible(&idev->wait);
35933 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35934 }
35935 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
35936 }
35937
35938 listener->dev = idev;
35939 - listener->event_count = atomic_read(&idev->event);
35940 + listener->event_count = atomic_read_unchecked(&idev->event);
35941 filep->private_data = listener;
35942
35943 if (idev->info->open) {
35944 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
35945 return -EIO;
35946
35947 poll_wait(filep, &idev->wait, wait);
35948 - if (listener->event_count != atomic_read(&idev->event))
35949 + if (listener->event_count != atomic_read_unchecked(&idev->event))
35950 return POLLIN | POLLRDNORM;
35951 return 0;
35952 }
35953 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
35954 do {
35955 set_current_state(TASK_INTERRUPTIBLE);
35956
35957 - event_count = atomic_read(&idev->event);
35958 + event_count = atomic_read_unchecked(&idev->event);
35959 if (event_count != listener->event_count) {
35960 if (copy_to_user(buf, &event_count, count))
35961 retval = -EFAULT;
35962 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
35963 static void uio_vma_open(struct vm_area_struct *vma)
35964 {
35965 struct uio_device *idev = vma->vm_private_data;
35966 - idev->vma_count++;
35967 + local_inc(&idev->vma_count);
35968 }
35969
35970 static void uio_vma_close(struct vm_area_struct *vma)
35971 {
35972 struct uio_device *idev = vma->vm_private_data;
35973 - idev->vma_count--;
35974 + local_dec(&idev->vma_count);
35975 }
35976
35977 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35978 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
35979 idev->owner = owner;
35980 idev->info = info;
35981 init_waitqueue_head(&idev->wait);
35982 - atomic_set(&idev->event, 0);
35983 + atomic_set_unchecked(&idev->event, 0);
35984
35985 ret = uio_get_minor(idev);
35986 if (ret)
35987 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
35988 index a845f8b..4f54072 100644
35989 --- a/drivers/usb/atm/cxacru.c
35990 +++ b/drivers/usb/atm/cxacru.c
35991 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
35992 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
35993 if (ret < 2)
35994 return -EINVAL;
35995 - if (index < 0 || index > 0x7f)
35996 + if (index > 0x7f)
35997 return -EINVAL;
35998 pos += tmp;
35999
36000 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36001 index d3448ca..d2864ca 100644
36002 --- a/drivers/usb/atm/usbatm.c
36003 +++ b/drivers/usb/atm/usbatm.c
36004 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36005 if (printk_ratelimit())
36006 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36007 __func__, vpi, vci);
36008 - atomic_inc(&vcc->stats->rx_err);
36009 + atomic_inc_unchecked(&vcc->stats->rx_err);
36010 return;
36011 }
36012
36013 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36014 if (length > ATM_MAX_AAL5_PDU) {
36015 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36016 __func__, length, vcc);
36017 - atomic_inc(&vcc->stats->rx_err);
36018 + atomic_inc_unchecked(&vcc->stats->rx_err);
36019 goto out;
36020 }
36021
36022 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36023 if (sarb->len < pdu_length) {
36024 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36025 __func__, pdu_length, sarb->len, vcc);
36026 - atomic_inc(&vcc->stats->rx_err);
36027 + atomic_inc_unchecked(&vcc->stats->rx_err);
36028 goto out;
36029 }
36030
36031 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36032 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36033 __func__, vcc);
36034 - atomic_inc(&vcc->stats->rx_err);
36035 + atomic_inc_unchecked(&vcc->stats->rx_err);
36036 goto out;
36037 }
36038
36039 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36040 if (printk_ratelimit())
36041 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36042 __func__, length);
36043 - atomic_inc(&vcc->stats->rx_drop);
36044 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36045 goto out;
36046 }
36047
36048 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36049
36050 vcc->push(vcc, skb);
36051
36052 - atomic_inc(&vcc->stats->rx);
36053 + atomic_inc_unchecked(&vcc->stats->rx);
36054 out:
36055 skb_trim(sarb, 0);
36056 }
36057 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36058 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36059
36060 usbatm_pop(vcc, skb);
36061 - atomic_inc(&vcc->stats->tx);
36062 + atomic_inc_unchecked(&vcc->stats->tx);
36063
36064 skb = skb_dequeue(&instance->sndqueue);
36065 }
36066 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36067 if (!left--)
36068 return sprintf(page,
36069 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36070 - atomic_read(&atm_dev->stats.aal5.tx),
36071 - atomic_read(&atm_dev->stats.aal5.tx_err),
36072 - atomic_read(&atm_dev->stats.aal5.rx),
36073 - atomic_read(&atm_dev->stats.aal5.rx_err),
36074 - atomic_read(&atm_dev->stats.aal5.rx_drop));
36075 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36076 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36077 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36078 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36079 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36080
36081 if (!left--) {
36082 if (instance->disconnected)
36083 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36084 index d956965..4179a77 100644
36085 --- a/drivers/usb/core/devices.c
36086 +++ b/drivers/usb/core/devices.c
36087 @@ -126,7 +126,7 @@ static const char format_endpt[] =
36088 * time it gets called.
36089 */
36090 static struct device_connect_event {
36091 - atomic_t count;
36092 + atomic_unchecked_t count;
36093 wait_queue_head_t wait;
36094 } device_event = {
36095 .count = ATOMIC_INIT(1),
36096 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36097
36098 void usbfs_conn_disc_event(void)
36099 {
36100 - atomic_add(2, &device_event.count);
36101 + atomic_add_unchecked(2, &device_event.count);
36102 wake_up(&device_event.wait);
36103 }
36104
36105 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36106
36107 poll_wait(file, &device_event.wait, wait);
36108
36109 - event_count = atomic_read(&device_event.count);
36110 + event_count = atomic_read_unchecked(&device_event.count);
36111 if (file->f_version != event_count) {
36112 file->f_version = event_count;
36113 return POLLIN | POLLRDNORM;
36114 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36115 index b3bdfed..a9460e0 100644
36116 --- a/drivers/usb/core/message.c
36117 +++ b/drivers/usb/core/message.c
36118 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36119 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36120 if (buf) {
36121 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36122 - if (len > 0) {
36123 - smallbuf = kmalloc(++len, GFP_NOIO);
36124 + if (len++ > 0) {
36125 + smallbuf = kmalloc(len, GFP_NOIO);
36126 if (!smallbuf)
36127 return buf;
36128 memcpy(smallbuf, buf, len);
36129 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36130 index 1fc8f12..20647c1 100644
36131 --- a/drivers/usb/early/ehci-dbgp.c
36132 +++ b/drivers/usb/early/ehci-dbgp.c
36133 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36134
36135 #ifdef CONFIG_KGDB
36136 static struct kgdb_io kgdbdbgp_io_ops;
36137 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36138 +static struct kgdb_io kgdbdbgp_io_ops_console;
36139 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36140 #else
36141 #define dbgp_kgdb_mode (0)
36142 #endif
36143 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36144 .write_char = kgdbdbgp_write_char,
36145 };
36146
36147 +static struct kgdb_io kgdbdbgp_io_ops_console = {
36148 + .name = "kgdbdbgp",
36149 + .read_char = kgdbdbgp_read_char,
36150 + .write_char = kgdbdbgp_write_char,
36151 + .is_console = 1
36152 +};
36153 +
36154 static int kgdbdbgp_wait_time;
36155
36156 static int __init kgdbdbgp_parse_config(char *str)
36157 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36158 ptr++;
36159 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36160 }
36161 - kgdb_register_io_module(&kgdbdbgp_io_ops);
36162 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36163 + if (early_dbgp_console.index != -1)
36164 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36165 + else
36166 + kgdb_register_io_module(&kgdbdbgp_io_ops);
36167
36168 return 0;
36169 }
36170 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36171 index d6bea3e..60b250e 100644
36172 --- a/drivers/usb/wusbcore/wa-hc.h
36173 +++ b/drivers/usb/wusbcore/wa-hc.h
36174 @@ -192,7 +192,7 @@ struct wahc {
36175 struct list_head xfer_delayed_list;
36176 spinlock_t xfer_list_lock;
36177 struct work_struct xfer_work;
36178 - atomic_t xfer_id_count;
36179 + atomic_unchecked_t xfer_id_count;
36180 };
36181
36182
36183 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36184 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36185 spin_lock_init(&wa->xfer_list_lock);
36186 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36187 - atomic_set(&wa->xfer_id_count, 1);
36188 + atomic_set_unchecked(&wa->xfer_id_count, 1);
36189 }
36190
36191 /**
36192 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36193 index 57c01ab..8a05959 100644
36194 --- a/drivers/usb/wusbcore/wa-xfer.c
36195 +++ b/drivers/usb/wusbcore/wa-xfer.c
36196 @@ -296,7 +296,7 @@ out:
36197 */
36198 static void wa_xfer_id_init(struct wa_xfer *xfer)
36199 {
36200 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36201 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36202 }
36203
36204 /*
36205 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36206 index c14c42b..f955cc2 100644
36207 --- a/drivers/vhost/vhost.c
36208 +++ b/drivers/vhost/vhost.c
36209 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36210 return 0;
36211 }
36212
36213 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36214 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36215 {
36216 struct file *eventfp, *filep = NULL,
36217 *pollstart = NULL, *pollstop = NULL;
36218 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36219 index b0b2ac3..89a4399 100644
36220 --- a/drivers/video/aty/aty128fb.c
36221 +++ b/drivers/video/aty/aty128fb.c
36222 @@ -148,7 +148,7 @@ enum {
36223 };
36224
36225 /* Must match above enum */
36226 -static const char *r128_family[] __devinitdata = {
36227 +static const char *r128_family[] __devinitconst = {
36228 "AGP",
36229 "PCI",
36230 "PRO AGP",
36231 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36232 index 5c3960d..15cf8fc 100644
36233 --- a/drivers/video/fbcmap.c
36234 +++ b/drivers/video/fbcmap.c
36235 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36236 rc = -ENODEV;
36237 goto out;
36238 }
36239 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36240 - !info->fbops->fb_setcmap)) {
36241 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36242 rc = -EINVAL;
36243 goto out1;
36244 }
36245 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36246 index ad93629..e020fc3 100644
36247 --- a/drivers/video/fbmem.c
36248 +++ b/drivers/video/fbmem.c
36249 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36250 image->dx += image->width + 8;
36251 }
36252 } else if (rotate == FB_ROTATE_UD) {
36253 - for (x = 0; x < num && image->dx >= 0; x++) {
36254 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36255 info->fbops->fb_imageblit(info, image);
36256 image->dx -= image->width + 8;
36257 }
36258 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36259 image->dy += image->height + 8;
36260 }
36261 } else if (rotate == FB_ROTATE_CCW) {
36262 - for (x = 0; x < num && image->dy >= 0; x++) {
36263 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36264 info->fbops->fb_imageblit(info, image);
36265 image->dy -= image->height + 8;
36266 }
36267 @@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36268 return -EFAULT;
36269 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36270 return -EINVAL;
36271 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36272 + if (con2fb.framebuffer >= FB_MAX)
36273 return -EINVAL;
36274 if (!registered_fb[con2fb.framebuffer])
36275 request_module("fb%d", con2fb.framebuffer);
36276 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36277 index 5a5d092..265c5ed 100644
36278 --- a/drivers/video/geode/gx1fb_core.c
36279 +++ b/drivers/video/geode/gx1fb_core.c
36280 @@ -29,7 +29,7 @@ static int crt_option = 1;
36281 static char panel_option[32] = "";
36282
36283 /* Modes relevant to the GX1 (taken from modedb.c) */
36284 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
36285 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
36286 /* 640x480-60 VESA */
36287 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36288 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36289 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36290 index 0fad23f..0e9afa4 100644
36291 --- a/drivers/video/gxt4500.c
36292 +++ b/drivers/video/gxt4500.c
36293 @@ -156,7 +156,7 @@ struct gxt4500_par {
36294 static char *mode_option;
36295
36296 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36297 -static const struct fb_videomode defaultmode __devinitdata = {
36298 +static const struct fb_videomode defaultmode __devinitconst = {
36299 .refresh = 60,
36300 .xres = 1280,
36301 .yres = 1024,
36302 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36303 return 0;
36304 }
36305
36306 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36307 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36308 .id = "IBM GXT4500P",
36309 .type = FB_TYPE_PACKED_PIXELS,
36310 .visual = FB_VISUAL_PSEUDOCOLOR,
36311 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36312 index 7672d2e..b56437f 100644
36313 --- a/drivers/video/i810/i810_accel.c
36314 +++ b/drivers/video/i810/i810_accel.c
36315 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36316 }
36317 }
36318 printk("ringbuffer lockup!!!\n");
36319 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36320 i810_report_error(mmio);
36321 par->dev_flags |= LOCKUP;
36322 info->pixmap.scan_align = 1;
36323 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36324 index 318f6fb..9a389c1 100644
36325 --- a/drivers/video/i810/i810_main.c
36326 +++ b/drivers/video/i810/i810_main.c
36327 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36328 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36329
36330 /* PCI */
36331 -static const char *i810_pci_list[] __devinitdata = {
36332 +static const char *i810_pci_list[] __devinitconst = {
36333 "Intel(R) 810 Framebuffer Device" ,
36334 "Intel(R) 810-DC100 Framebuffer Device" ,
36335 "Intel(R) 810E Framebuffer Device" ,
36336 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36337 index de36693..3c63fc2 100644
36338 --- a/drivers/video/jz4740_fb.c
36339 +++ b/drivers/video/jz4740_fb.c
36340 @@ -136,7 +136,7 @@ struct jzfb {
36341 uint32_t pseudo_palette[16];
36342 };
36343
36344 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36345 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36346 .id = "JZ4740 FB",
36347 .type = FB_TYPE_PACKED_PIXELS,
36348 .visual = FB_VISUAL_TRUECOLOR,
36349 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36350 index 3c14e43..eafa544 100644
36351 --- a/drivers/video/logo/logo_linux_clut224.ppm
36352 +++ b/drivers/video/logo/logo_linux_clut224.ppm
36353 @@ -1,1604 +1,1123 @@
36354 P3
36355 -# Standard 224-color Linux logo
36356 80 80
36357 255
36358 - 0 0 0 0 0 0 0 0 0 0 0 0
36359 - 0 0 0 0 0 0 0 0 0 0 0 0
36360 - 0 0 0 0 0 0 0 0 0 0 0 0
36361 - 0 0 0 0 0 0 0 0 0 0 0 0
36362 - 0 0 0 0 0 0 0 0 0 0 0 0
36363 - 0 0 0 0 0 0 0 0 0 0 0 0
36364 - 0 0 0 0 0 0 0 0 0 0 0 0
36365 - 0 0 0 0 0 0 0 0 0 0 0 0
36366 - 0 0 0 0 0 0 0 0 0 0 0 0
36367 - 6 6 6 6 6 6 10 10 10 10 10 10
36368 - 10 10 10 6 6 6 6 6 6 6 6 6
36369 - 0 0 0 0 0 0 0 0 0 0 0 0
36370 - 0 0 0 0 0 0 0 0 0 0 0 0
36371 - 0 0 0 0 0 0 0 0 0 0 0 0
36372 - 0 0 0 0 0 0 0 0 0 0 0 0
36373 - 0 0 0 0 0 0 0 0 0 0 0 0
36374 - 0 0 0 0 0 0 0 0 0 0 0 0
36375 - 0 0 0 0 0 0 0 0 0 0 0 0
36376 - 0 0 0 0 0 0 0 0 0 0 0 0
36377 - 0 0 0 0 0 0 0 0 0 0 0 0
36378 - 0 0 0 0 0 0 0 0 0 0 0 0
36379 - 0 0 0 0 0 0 0 0 0 0 0 0
36380 - 0 0 0 0 0 0 0 0 0 0 0 0
36381 - 0 0 0 0 0 0 0 0 0 0 0 0
36382 - 0 0 0 0 0 0 0 0 0 0 0 0
36383 - 0 0 0 0 0 0 0 0 0 0 0 0
36384 - 0 0 0 0 0 0 0 0 0 0 0 0
36385 - 0 0 0 0 0 0 0 0 0 0 0 0
36386 - 0 0 0 6 6 6 10 10 10 14 14 14
36387 - 22 22 22 26 26 26 30 30 30 34 34 34
36388 - 30 30 30 30 30 30 26 26 26 18 18 18
36389 - 14 14 14 10 10 10 6 6 6 0 0 0
36390 - 0 0 0 0 0 0 0 0 0 0 0 0
36391 - 0 0 0 0 0 0 0 0 0 0 0 0
36392 - 0 0 0 0 0 0 0 0 0 0 0 0
36393 - 0 0 0 0 0 0 0 0 0 0 0 0
36394 - 0 0 0 0 0 0 0 0 0 0 0 0
36395 - 0 0 0 0 0 0 0 0 0 0 0 0
36396 - 0 0 0 0 0 0 0 0 0 0 0 0
36397 - 0 0 0 0 0 0 0 0 0 0 0 0
36398 - 0 0 0 0 0 0 0 0 0 0 0 0
36399 - 0 0 0 0 0 1 0 0 1 0 0 0
36400 - 0 0 0 0 0 0 0 0 0 0 0 0
36401 - 0 0 0 0 0 0 0 0 0 0 0 0
36402 - 0 0 0 0 0 0 0 0 0 0 0 0
36403 - 0 0 0 0 0 0 0 0 0 0 0 0
36404 - 0 0 0 0 0 0 0 0 0 0 0 0
36405 - 0 0 0 0 0 0 0 0 0 0 0 0
36406 - 6 6 6 14 14 14 26 26 26 42 42 42
36407 - 54 54 54 66 66 66 78 78 78 78 78 78
36408 - 78 78 78 74 74 74 66 66 66 54 54 54
36409 - 42 42 42 26 26 26 18 18 18 10 10 10
36410 - 6 6 6 0 0 0 0 0 0 0 0 0
36411 - 0 0 0 0 0 0 0 0 0 0 0 0
36412 - 0 0 0 0 0 0 0 0 0 0 0 0
36413 - 0 0 0 0 0 0 0 0 0 0 0 0
36414 - 0 0 0 0 0 0 0 0 0 0 0 0
36415 - 0 0 0 0 0 0 0 0 0 0 0 0
36416 - 0 0 0 0 0 0 0 0 0 0 0 0
36417 - 0 0 0 0 0 0 0 0 0 0 0 0
36418 - 0 0 0 0 0 0 0 0 0 0 0 0
36419 - 0 0 1 0 0 0 0 0 0 0 0 0
36420 - 0 0 0 0 0 0 0 0 0 0 0 0
36421 - 0 0 0 0 0 0 0 0 0 0 0 0
36422 - 0 0 0 0 0 0 0 0 0 0 0 0
36423 - 0 0 0 0 0 0 0 0 0 0 0 0
36424 - 0 0 0 0 0 0 0 0 0 0 0 0
36425 - 0 0 0 0 0 0 0 0 0 10 10 10
36426 - 22 22 22 42 42 42 66 66 66 86 86 86
36427 - 66 66 66 38 38 38 38 38 38 22 22 22
36428 - 26 26 26 34 34 34 54 54 54 66 66 66
36429 - 86 86 86 70 70 70 46 46 46 26 26 26
36430 - 14 14 14 6 6 6 0 0 0 0 0 0
36431 - 0 0 0 0 0 0 0 0 0 0 0 0
36432 - 0 0 0 0 0 0 0 0 0 0 0 0
36433 - 0 0 0 0 0 0 0 0 0 0 0 0
36434 - 0 0 0 0 0 0 0 0 0 0 0 0
36435 - 0 0 0 0 0 0 0 0 0 0 0 0
36436 - 0 0 0 0 0 0 0 0 0 0 0 0
36437 - 0 0 0 0 0 0 0 0 0 0 0 0
36438 - 0 0 0 0 0 0 0 0 0 0 0 0
36439 - 0 0 1 0 0 1 0 0 1 0 0 0
36440 - 0 0 0 0 0 0 0 0 0 0 0 0
36441 - 0 0 0 0 0 0 0 0 0 0 0 0
36442 - 0 0 0 0 0 0 0 0 0 0 0 0
36443 - 0 0 0 0 0 0 0 0 0 0 0 0
36444 - 0 0 0 0 0 0 0 0 0 0 0 0
36445 - 0 0 0 0 0 0 10 10 10 26 26 26
36446 - 50 50 50 82 82 82 58 58 58 6 6 6
36447 - 2 2 6 2 2 6 2 2 6 2 2 6
36448 - 2 2 6 2 2 6 2 2 6 2 2 6
36449 - 6 6 6 54 54 54 86 86 86 66 66 66
36450 - 38 38 38 18 18 18 6 6 6 0 0 0
36451 - 0 0 0 0 0 0 0 0 0 0 0 0
36452 - 0 0 0 0 0 0 0 0 0 0 0 0
36453 - 0 0 0 0 0 0 0 0 0 0 0 0
36454 - 0 0 0 0 0 0 0 0 0 0 0 0
36455 - 0 0 0 0 0 0 0 0 0 0 0 0
36456 - 0 0 0 0 0 0 0 0 0 0 0 0
36457 - 0 0 0 0 0 0 0 0 0 0 0 0
36458 - 0 0 0 0 0 0 0 0 0 0 0 0
36459 - 0 0 0 0 0 0 0 0 0 0 0 0
36460 - 0 0 0 0 0 0 0 0 0 0 0 0
36461 - 0 0 0 0 0 0 0 0 0 0 0 0
36462 - 0 0 0 0 0 0 0 0 0 0 0 0
36463 - 0 0 0 0 0 0 0 0 0 0 0 0
36464 - 0 0 0 0 0 0 0 0 0 0 0 0
36465 - 0 0 0 6 6 6 22 22 22 50 50 50
36466 - 78 78 78 34 34 34 2 2 6 2 2 6
36467 - 2 2 6 2 2 6 2 2 6 2 2 6
36468 - 2 2 6 2 2 6 2 2 6 2 2 6
36469 - 2 2 6 2 2 6 6 6 6 70 70 70
36470 - 78 78 78 46 46 46 22 22 22 6 6 6
36471 - 0 0 0 0 0 0 0 0 0 0 0 0
36472 - 0 0 0 0 0 0 0 0 0 0 0 0
36473 - 0 0 0 0 0 0 0 0 0 0 0 0
36474 - 0 0 0 0 0 0 0 0 0 0 0 0
36475 - 0 0 0 0 0 0 0 0 0 0 0 0
36476 - 0 0 0 0 0 0 0 0 0 0 0 0
36477 - 0 0 0 0 0 0 0 0 0 0 0 0
36478 - 0 0 0 0 0 0 0 0 0 0 0 0
36479 - 0 0 1 0 0 1 0 0 1 0 0 0
36480 - 0 0 0 0 0 0 0 0 0 0 0 0
36481 - 0 0 0 0 0 0 0 0 0 0 0 0
36482 - 0 0 0 0 0 0 0 0 0 0 0 0
36483 - 0 0 0 0 0 0 0 0 0 0 0 0
36484 - 0 0 0 0 0 0 0 0 0 0 0 0
36485 - 6 6 6 18 18 18 42 42 42 82 82 82
36486 - 26 26 26 2 2 6 2 2 6 2 2 6
36487 - 2 2 6 2 2 6 2 2 6 2 2 6
36488 - 2 2 6 2 2 6 2 2 6 14 14 14
36489 - 46 46 46 34 34 34 6 6 6 2 2 6
36490 - 42 42 42 78 78 78 42 42 42 18 18 18
36491 - 6 6 6 0 0 0 0 0 0 0 0 0
36492 - 0 0 0 0 0 0 0 0 0 0 0 0
36493 - 0 0 0 0 0 0 0 0 0 0 0 0
36494 - 0 0 0 0 0 0 0 0 0 0 0 0
36495 - 0 0 0 0 0 0 0 0 0 0 0 0
36496 - 0 0 0 0 0 0 0 0 0 0 0 0
36497 - 0 0 0 0 0 0 0 0 0 0 0 0
36498 - 0 0 0 0 0 0 0 0 0 0 0 0
36499 - 0 0 1 0 0 0 0 0 1 0 0 0
36500 - 0 0 0 0 0 0 0 0 0 0 0 0
36501 - 0 0 0 0 0 0 0 0 0 0 0 0
36502 - 0 0 0 0 0 0 0 0 0 0 0 0
36503 - 0 0 0 0 0 0 0 0 0 0 0 0
36504 - 0 0 0 0 0 0 0 0 0 0 0 0
36505 - 10 10 10 30 30 30 66 66 66 58 58 58
36506 - 2 2 6 2 2 6 2 2 6 2 2 6
36507 - 2 2 6 2 2 6 2 2 6 2 2 6
36508 - 2 2 6 2 2 6 2 2 6 26 26 26
36509 - 86 86 86 101 101 101 46 46 46 10 10 10
36510 - 2 2 6 58 58 58 70 70 70 34 34 34
36511 - 10 10 10 0 0 0 0 0 0 0 0 0
36512 - 0 0 0 0 0 0 0 0 0 0 0 0
36513 - 0 0 0 0 0 0 0 0 0 0 0 0
36514 - 0 0 0 0 0 0 0 0 0 0 0 0
36515 - 0 0 0 0 0 0 0 0 0 0 0 0
36516 - 0 0 0 0 0 0 0 0 0 0 0 0
36517 - 0 0 0 0 0 0 0 0 0 0 0 0
36518 - 0 0 0 0 0 0 0 0 0 0 0 0
36519 - 0 0 1 0 0 1 0 0 1 0 0 0
36520 - 0 0 0 0 0 0 0 0 0 0 0 0
36521 - 0 0 0 0 0 0 0 0 0 0 0 0
36522 - 0 0 0 0 0 0 0 0 0 0 0 0
36523 - 0 0 0 0 0 0 0 0 0 0 0 0
36524 - 0 0 0 0 0 0 0 0 0 0 0 0
36525 - 14 14 14 42 42 42 86 86 86 10 10 10
36526 - 2 2 6 2 2 6 2 2 6 2 2 6
36527 - 2 2 6 2 2 6 2 2 6 2 2 6
36528 - 2 2 6 2 2 6 2 2 6 30 30 30
36529 - 94 94 94 94 94 94 58 58 58 26 26 26
36530 - 2 2 6 6 6 6 78 78 78 54 54 54
36531 - 22 22 22 6 6 6 0 0 0 0 0 0
36532 - 0 0 0 0 0 0 0 0 0 0 0 0
36533 - 0 0 0 0 0 0 0 0 0 0 0 0
36534 - 0 0 0 0 0 0 0 0 0 0 0 0
36535 - 0 0 0 0 0 0 0 0 0 0 0 0
36536 - 0 0 0 0 0 0 0 0 0 0 0 0
36537 - 0 0 0 0 0 0 0 0 0 0 0 0
36538 - 0 0 0 0 0 0 0 0 0 0 0 0
36539 - 0 0 0 0 0 0 0 0 0 0 0 0
36540 - 0 0 0 0 0 0 0 0 0 0 0 0
36541 - 0 0 0 0 0 0 0 0 0 0 0 0
36542 - 0 0 0 0 0 0 0 0 0 0 0 0
36543 - 0 0 0 0 0 0 0 0 0 0 0 0
36544 - 0 0 0 0 0 0 0 0 0 6 6 6
36545 - 22 22 22 62 62 62 62 62 62 2 2 6
36546 - 2 2 6 2 2 6 2 2 6 2 2 6
36547 - 2 2 6 2 2 6 2 2 6 2 2 6
36548 - 2 2 6 2 2 6 2 2 6 26 26 26
36549 - 54 54 54 38 38 38 18 18 18 10 10 10
36550 - 2 2 6 2 2 6 34 34 34 82 82 82
36551 - 38 38 38 14 14 14 0 0 0 0 0 0
36552 - 0 0 0 0 0 0 0 0 0 0 0 0
36553 - 0 0 0 0 0 0 0 0 0 0 0 0
36554 - 0 0 0 0 0 0 0 0 0 0 0 0
36555 - 0 0 0 0 0 0 0 0 0 0 0 0
36556 - 0 0 0 0 0 0 0 0 0 0 0 0
36557 - 0 0 0 0 0 0 0 0 0 0 0 0
36558 - 0 0 0 0 0 0 0 0 0 0 0 0
36559 - 0 0 0 0 0 1 0 0 1 0 0 0
36560 - 0 0 0 0 0 0 0 0 0 0 0 0
36561 - 0 0 0 0 0 0 0 0 0 0 0 0
36562 - 0 0 0 0 0 0 0 0 0 0 0 0
36563 - 0 0 0 0 0 0 0 0 0 0 0 0
36564 - 0 0 0 0 0 0 0 0 0 6 6 6
36565 - 30 30 30 78 78 78 30 30 30 2 2 6
36566 - 2 2 6 2 2 6 2 2 6 2 2 6
36567 - 2 2 6 2 2 6 2 2 6 2 2 6
36568 - 2 2 6 2 2 6 2 2 6 10 10 10
36569 - 10 10 10 2 2 6 2 2 6 2 2 6
36570 - 2 2 6 2 2 6 2 2 6 78 78 78
36571 - 50 50 50 18 18 18 6 6 6 0 0 0
36572 - 0 0 0 0 0 0 0 0 0 0 0 0
36573 - 0 0 0 0 0 0 0 0 0 0 0 0
36574 - 0 0 0 0 0 0 0 0 0 0 0 0
36575 - 0 0 0 0 0 0 0 0 0 0 0 0
36576 - 0 0 0 0 0 0 0 0 0 0 0 0
36577 - 0 0 0 0 0 0 0 0 0 0 0 0
36578 - 0 0 0 0 0 0 0 0 0 0 0 0
36579 - 0 0 1 0 0 0 0 0 0 0 0 0
36580 - 0 0 0 0 0 0 0 0 0 0 0 0
36581 - 0 0 0 0 0 0 0 0 0 0 0 0
36582 - 0 0 0 0 0 0 0 0 0 0 0 0
36583 - 0 0 0 0 0 0 0 0 0 0 0 0
36584 - 0 0 0 0 0 0 0 0 0 10 10 10
36585 - 38 38 38 86 86 86 14 14 14 2 2 6
36586 - 2 2 6 2 2 6 2 2 6 2 2 6
36587 - 2 2 6 2 2 6 2 2 6 2 2 6
36588 - 2 2 6 2 2 6 2 2 6 2 2 6
36589 - 2 2 6 2 2 6 2 2 6 2 2 6
36590 - 2 2 6 2 2 6 2 2 6 54 54 54
36591 - 66 66 66 26 26 26 6 6 6 0 0 0
36592 - 0 0 0 0 0 0 0 0 0 0 0 0
36593 - 0 0 0 0 0 0 0 0 0 0 0 0
36594 - 0 0 0 0 0 0 0 0 0 0 0 0
36595 - 0 0 0 0 0 0 0 0 0 0 0 0
36596 - 0 0 0 0 0 0 0 0 0 0 0 0
36597 - 0 0 0 0 0 0 0 0 0 0 0 0
36598 - 0 0 0 0 0 0 0 0 0 0 0 0
36599 - 0 0 0 0 0 1 0 0 1 0 0 0
36600 - 0 0 0 0 0 0 0 0 0 0 0 0
36601 - 0 0 0 0 0 0 0 0 0 0 0 0
36602 - 0 0 0 0 0 0 0 0 0 0 0 0
36603 - 0 0 0 0 0 0 0 0 0 0 0 0
36604 - 0 0 0 0 0 0 0 0 0 14 14 14
36605 - 42 42 42 82 82 82 2 2 6 2 2 6
36606 - 2 2 6 6 6 6 10 10 10 2 2 6
36607 - 2 2 6 2 2 6 2 2 6 2 2 6
36608 - 2 2 6 2 2 6 2 2 6 6 6 6
36609 - 14 14 14 10 10 10 2 2 6 2 2 6
36610 - 2 2 6 2 2 6 2 2 6 18 18 18
36611 - 82 82 82 34 34 34 10 10 10 0 0 0
36612 - 0 0 0 0 0 0 0 0 0 0 0 0
36613 - 0 0 0 0 0 0 0 0 0 0 0 0
36614 - 0 0 0 0 0 0 0 0 0 0 0 0
36615 - 0 0 0 0 0 0 0 0 0 0 0 0
36616 - 0 0 0 0 0 0 0 0 0 0 0 0
36617 - 0 0 0 0 0 0 0 0 0 0 0 0
36618 - 0 0 0 0 0 0 0 0 0 0 0 0
36619 - 0 0 1 0 0 0 0 0 0 0 0 0
36620 - 0 0 0 0 0 0 0 0 0 0 0 0
36621 - 0 0 0 0 0 0 0 0 0 0 0 0
36622 - 0 0 0 0 0 0 0 0 0 0 0 0
36623 - 0 0 0 0 0 0 0 0 0 0 0 0
36624 - 0 0 0 0 0 0 0 0 0 14 14 14
36625 - 46 46 46 86 86 86 2 2 6 2 2 6
36626 - 6 6 6 6 6 6 22 22 22 34 34 34
36627 - 6 6 6 2 2 6 2 2 6 2 2 6
36628 - 2 2 6 2 2 6 18 18 18 34 34 34
36629 - 10 10 10 50 50 50 22 22 22 2 2 6
36630 - 2 2 6 2 2 6 2 2 6 10 10 10
36631 - 86 86 86 42 42 42 14 14 14 0 0 0
36632 - 0 0 0 0 0 0 0 0 0 0 0 0
36633 - 0 0 0 0 0 0 0 0 0 0 0 0
36634 - 0 0 0 0 0 0 0 0 0 0 0 0
36635 - 0 0 0 0 0 0 0 0 0 0 0 0
36636 - 0 0 0 0 0 0 0 0 0 0 0 0
36637 - 0 0 0 0 0 0 0 0 0 0 0 0
36638 - 0 0 0 0 0 0 0 0 0 0 0 0
36639 - 0 0 1 0 0 1 0 0 1 0 0 0
36640 - 0 0 0 0 0 0 0 0 0 0 0 0
36641 - 0 0 0 0 0 0 0 0 0 0 0 0
36642 - 0 0 0 0 0 0 0 0 0 0 0 0
36643 - 0 0 0 0 0 0 0 0 0 0 0 0
36644 - 0 0 0 0 0 0 0 0 0 14 14 14
36645 - 46 46 46 86 86 86 2 2 6 2 2 6
36646 - 38 38 38 116 116 116 94 94 94 22 22 22
36647 - 22 22 22 2 2 6 2 2 6 2 2 6
36648 - 14 14 14 86 86 86 138 138 138 162 162 162
36649 -154 154 154 38 38 38 26 26 26 6 6 6
36650 - 2 2 6 2 2 6 2 2 6 2 2 6
36651 - 86 86 86 46 46 46 14 14 14 0 0 0
36652 - 0 0 0 0 0 0 0 0 0 0 0 0
36653 - 0 0 0 0 0 0 0 0 0 0 0 0
36654 - 0 0 0 0 0 0 0 0 0 0 0 0
36655 - 0 0 0 0 0 0 0 0 0 0 0 0
36656 - 0 0 0 0 0 0 0 0 0 0 0 0
36657 - 0 0 0 0 0 0 0 0 0 0 0 0
36658 - 0 0 0 0 0 0 0 0 0 0 0 0
36659 - 0 0 0 0 0 0 0 0 0 0 0 0
36660 - 0 0 0 0 0 0 0 0 0 0 0 0
36661 - 0 0 0 0 0 0 0 0 0 0 0 0
36662 - 0 0 0 0 0 0 0 0 0 0 0 0
36663 - 0 0 0 0 0 0 0 0 0 0 0 0
36664 - 0 0 0 0 0 0 0 0 0 14 14 14
36665 - 46 46 46 86 86 86 2 2 6 14 14 14
36666 -134 134 134 198 198 198 195 195 195 116 116 116
36667 - 10 10 10 2 2 6 2 2 6 6 6 6
36668 -101 98 89 187 187 187 210 210 210 218 218 218
36669 -214 214 214 134 134 134 14 14 14 6 6 6
36670 - 2 2 6 2 2 6 2 2 6 2 2 6
36671 - 86 86 86 50 50 50 18 18 18 6 6 6
36672 - 0 0 0 0 0 0 0 0 0 0 0 0
36673 - 0 0 0 0 0 0 0 0 0 0 0 0
36674 - 0 0 0 0 0 0 0 0 0 0 0 0
36675 - 0 0 0 0 0 0 0 0 0 0 0 0
36676 - 0 0 0 0 0 0 0 0 0 0 0 0
36677 - 0 0 0 0 0 0 0 0 0 0 0 0
36678 - 0 0 0 0 0 0 0 0 1 0 0 0
36679 - 0 0 1 0 0 1 0 0 1 0 0 0
36680 - 0 0 0 0 0 0 0 0 0 0 0 0
36681 - 0 0 0 0 0 0 0 0 0 0 0 0
36682 - 0 0 0 0 0 0 0 0 0 0 0 0
36683 - 0 0 0 0 0 0 0 0 0 0 0 0
36684 - 0 0 0 0 0 0 0 0 0 14 14 14
36685 - 46 46 46 86 86 86 2 2 6 54 54 54
36686 -218 218 218 195 195 195 226 226 226 246 246 246
36687 - 58 58 58 2 2 6 2 2 6 30 30 30
36688 -210 210 210 253 253 253 174 174 174 123 123 123
36689 -221 221 221 234 234 234 74 74 74 2 2 6
36690 - 2 2 6 2 2 6 2 2 6 2 2 6
36691 - 70 70 70 58 58 58 22 22 22 6 6 6
36692 - 0 0 0 0 0 0 0 0 0 0 0 0
36693 - 0 0 0 0 0 0 0 0 0 0 0 0
36694 - 0 0 0 0 0 0 0 0 0 0 0 0
36695 - 0 0 0 0 0 0 0 0 0 0 0 0
36696 - 0 0 0 0 0 0 0 0 0 0 0 0
36697 - 0 0 0 0 0 0 0 0 0 0 0 0
36698 - 0 0 0 0 0 0 0 0 0 0 0 0
36699 - 0 0 0 0 0 0 0 0 0 0 0 0
36700 - 0 0 0 0 0 0 0 0 0 0 0 0
36701 - 0 0 0 0 0 0 0 0 0 0 0 0
36702 - 0 0 0 0 0 0 0 0 0 0 0 0
36703 - 0 0 0 0 0 0 0 0 0 0 0 0
36704 - 0 0 0 0 0 0 0 0 0 14 14 14
36705 - 46 46 46 82 82 82 2 2 6 106 106 106
36706 -170 170 170 26 26 26 86 86 86 226 226 226
36707 -123 123 123 10 10 10 14 14 14 46 46 46
36708 -231 231 231 190 190 190 6 6 6 70 70 70
36709 - 90 90 90 238 238 238 158 158 158 2 2 6
36710 - 2 2 6 2 2 6 2 2 6 2 2 6
36711 - 70 70 70 58 58 58 22 22 22 6 6 6
36712 - 0 0 0 0 0 0 0 0 0 0 0 0
36713 - 0 0 0 0 0 0 0 0 0 0 0 0
36714 - 0 0 0 0 0 0 0 0 0 0 0 0
36715 - 0 0 0 0 0 0 0 0 0 0 0 0
36716 - 0 0 0 0 0 0 0 0 0 0 0 0
36717 - 0 0 0 0 0 0 0 0 0 0 0 0
36718 - 0 0 0 0 0 0 0 0 1 0 0 0
36719 - 0 0 1 0 0 1 0 0 1 0 0 0
36720 - 0 0 0 0 0 0 0 0 0 0 0 0
36721 - 0 0 0 0 0 0 0 0 0 0 0 0
36722 - 0 0 0 0 0 0 0 0 0 0 0 0
36723 - 0 0 0 0 0 0 0 0 0 0 0 0
36724 - 0 0 0 0 0 0 0 0 0 14 14 14
36725 - 42 42 42 86 86 86 6 6 6 116 116 116
36726 -106 106 106 6 6 6 70 70 70 149 149 149
36727 -128 128 128 18 18 18 38 38 38 54 54 54
36728 -221 221 221 106 106 106 2 2 6 14 14 14
36729 - 46 46 46 190 190 190 198 198 198 2 2 6
36730 - 2 2 6 2 2 6 2 2 6 2 2 6
36731 - 74 74 74 62 62 62 22 22 22 6 6 6
36732 - 0 0 0 0 0 0 0 0 0 0 0 0
36733 - 0 0 0 0 0 0 0 0 0 0 0 0
36734 - 0 0 0 0 0 0 0 0 0 0 0 0
36735 - 0 0 0 0 0 0 0 0 0 0 0 0
36736 - 0 0 0 0 0 0 0 0 0 0 0 0
36737 - 0 0 0 0 0 0 0 0 0 0 0 0
36738 - 0 0 0 0 0 0 0 0 1 0 0 0
36739 - 0 0 1 0 0 0 0 0 1 0 0 0
36740 - 0 0 0 0 0 0 0 0 0 0 0 0
36741 - 0 0 0 0 0 0 0 0 0 0 0 0
36742 - 0 0 0 0 0 0 0 0 0 0 0 0
36743 - 0 0 0 0 0 0 0 0 0 0 0 0
36744 - 0 0 0 0 0 0 0 0 0 14 14 14
36745 - 42 42 42 94 94 94 14 14 14 101 101 101
36746 -128 128 128 2 2 6 18 18 18 116 116 116
36747 -118 98 46 121 92 8 121 92 8 98 78 10
36748 -162 162 162 106 106 106 2 2 6 2 2 6
36749 - 2 2 6 195 195 195 195 195 195 6 6 6
36750 - 2 2 6 2 2 6 2 2 6 2 2 6
36751 - 74 74 74 62 62 62 22 22 22 6 6 6
36752 - 0 0 0 0 0 0 0 0 0 0 0 0
36753 - 0 0 0 0 0 0 0 0 0 0 0 0
36754 - 0 0 0 0 0 0 0 0 0 0 0 0
36755 - 0 0 0 0 0 0 0 0 0 0 0 0
36756 - 0 0 0 0 0 0 0 0 0 0 0 0
36757 - 0 0 0 0 0 0 0 0 0 0 0 0
36758 - 0 0 0 0 0 0 0 0 1 0 0 1
36759 - 0 0 1 0 0 0 0 0 1 0 0 0
36760 - 0 0 0 0 0 0 0 0 0 0 0 0
36761 - 0 0 0 0 0 0 0 0 0 0 0 0
36762 - 0 0 0 0 0 0 0 0 0 0 0 0
36763 - 0 0 0 0 0 0 0 0 0 0 0 0
36764 - 0 0 0 0 0 0 0 0 0 10 10 10
36765 - 38 38 38 90 90 90 14 14 14 58 58 58
36766 -210 210 210 26 26 26 54 38 6 154 114 10
36767 -226 170 11 236 186 11 225 175 15 184 144 12
36768 -215 174 15 175 146 61 37 26 9 2 2 6
36769 - 70 70 70 246 246 246 138 138 138 2 2 6
36770 - 2 2 6 2 2 6 2 2 6 2 2 6
36771 - 70 70 70 66 66 66 26 26 26 6 6 6
36772 - 0 0 0 0 0 0 0 0 0 0 0 0
36773 - 0 0 0 0 0 0 0 0 0 0 0 0
36774 - 0 0 0 0 0 0 0 0 0 0 0 0
36775 - 0 0 0 0 0 0 0 0 0 0 0 0
36776 - 0 0 0 0 0 0 0 0 0 0 0 0
36777 - 0 0 0 0 0 0 0 0 0 0 0 0
36778 - 0 0 0 0 0 0 0 0 0 0 0 0
36779 - 0 0 0 0 0 0 0 0 0 0 0 0
36780 - 0 0 0 0 0 0 0 0 0 0 0 0
36781 - 0 0 0 0 0 0 0 0 0 0 0 0
36782 - 0 0 0 0 0 0 0 0 0 0 0 0
36783 - 0 0 0 0 0 0 0 0 0 0 0 0
36784 - 0 0 0 0 0 0 0 0 0 10 10 10
36785 - 38 38 38 86 86 86 14 14 14 10 10 10
36786 -195 195 195 188 164 115 192 133 9 225 175 15
36787 -239 182 13 234 190 10 232 195 16 232 200 30
36788 -245 207 45 241 208 19 232 195 16 184 144 12
36789 -218 194 134 211 206 186 42 42 42 2 2 6
36790 - 2 2 6 2 2 6 2 2 6 2 2 6
36791 - 50 50 50 74 74 74 30 30 30 6 6 6
36792 - 0 0 0 0 0 0 0 0 0 0 0 0
36793 - 0 0 0 0 0 0 0 0 0 0 0 0
36794 - 0 0 0 0 0 0 0 0 0 0 0 0
36795 - 0 0 0 0 0 0 0 0 0 0 0 0
36796 - 0 0 0 0 0 0 0 0 0 0 0 0
36797 - 0 0 0 0 0 0 0 0 0 0 0 0
36798 - 0 0 0 0 0 0 0 0 0 0 0 0
36799 - 0 0 0 0 0 0 0 0 0 0 0 0
36800 - 0 0 0 0 0 0 0 0 0 0 0 0
36801 - 0 0 0 0 0 0 0 0 0 0 0 0
36802 - 0 0 0 0 0 0 0 0 0 0 0 0
36803 - 0 0 0 0 0 0 0 0 0 0 0 0
36804 - 0 0 0 0 0 0 0 0 0 10 10 10
36805 - 34 34 34 86 86 86 14 14 14 2 2 6
36806 -121 87 25 192 133 9 219 162 10 239 182 13
36807 -236 186 11 232 195 16 241 208 19 244 214 54
36808 -246 218 60 246 218 38 246 215 20 241 208 19
36809 -241 208 19 226 184 13 121 87 25 2 2 6
36810 - 2 2 6 2 2 6 2 2 6 2 2 6
36811 - 50 50 50 82 82 82 34 34 34 10 10 10
36812 - 0 0 0 0 0 0 0 0 0 0 0 0
36813 - 0 0 0 0 0 0 0 0 0 0 0 0
36814 - 0 0 0 0 0 0 0 0 0 0 0 0
36815 - 0 0 0 0 0 0 0 0 0 0 0 0
36816 - 0 0 0 0 0 0 0 0 0 0 0 0
36817 - 0 0 0 0 0 0 0 0 0 0 0 0
36818 - 0 0 0 0 0 0 0 0 0 0 0 0
36819 - 0 0 0 0 0 0 0 0 0 0 0 0
36820 - 0 0 0 0 0 0 0 0 0 0 0 0
36821 - 0 0 0 0 0 0 0 0 0 0 0 0
36822 - 0 0 0 0 0 0 0 0 0 0 0 0
36823 - 0 0 0 0 0 0 0 0 0 0 0 0
36824 - 0 0 0 0 0 0 0 0 0 10 10 10
36825 - 34 34 34 82 82 82 30 30 30 61 42 6
36826 -180 123 7 206 145 10 230 174 11 239 182 13
36827 -234 190 10 238 202 15 241 208 19 246 218 74
36828 -246 218 38 246 215 20 246 215 20 246 215 20
36829 -226 184 13 215 174 15 184 144 12 6 6 6
36830 - 2 2 6 2 2 6 2 2 6 2 2 6
36831 - 26 26 26 94 94 94 42 42 42 14 14 14
36832 - 0 0 0 0 0 0 0 0 0 0 0 0
36833 - 0 0 0 0 0 0 0 0 0 0 0 0
36834 - 0 0 0 0 0 0 0 0 0 0 0 0
36835 - 0 0 0 0 0 0 0 0 0 0 0 0
36836 - 0 0 0 0 0 0 0 0 0 0 0 0
36837 - 0 0 0 0 0 0 0 0 0 0 0 0
36838 - 0 0 0 0 0 0 0 0 0 0 0 0
36839 - 0 0 0 0 0 0 0 0 0 0 0 0
36840 - 0 0 0 0 0 0 0 0 0 0 0 0
36841 - 0 0 0 0 0 0 0 0 0 0 0 0
36842 - 0 0 0 0 0 0 0 0 0 0 0 0
36843 - 0 0 0 0 0 0 0 0 0 0 0 0
36844 - 0 0 0 0 0 0 0 0 0 10 10 10
36845 - 30 30 30 78 78 78 50 50 50 104 69 6
36846 -192 133 9 216 158 10 236 178 12 236 186 11
36847 -232 195 16 241 208 19 244 214 54 245 215 43
36848 -246 215 20 246 215 20 241 208 19 198 155 10
36849 -200 144 11 216 158 10 156 118 10 2 2 6
36850 - 2 2 6 2 2 6 2 2 6 2 2 6
36851 - 6 6 6 90 90 90 54 54 54 18 18 18
36852 - 6 6 6 0 0 0 0 0 0 0 0 0
36853 - 0 0 0 0 0 0 0 0 0 0 0 0
36854 - 0 0 0 0 0 0 0 0 0 0 0 0
36855 - 0 0 0 0 0 0 0 0 0 0 0 0
36856 - 0 0 0 0 0 0 0 0 0 0 0 0
36857 - 0 0 0 0 0 0 0 0 0 0 0 0
36858 - 0 0 0 0 0 0 0 0 0 0 0 0
36859 - 0 0 0 0 0 0 0 0 0 0 0 0
36860 - 0 0 0 0 0 0 0 0 0 0 0 0
36861 - 0 0 0 0 0 0 0 0 0 0 0 0
36862 - 0 0 0 0 0 0 0 0 0 0 0 0
36863 - 0 0 0 0 0 0 0 0 0 0 0 0
36864 - 0 0 0 0 0 0 0 0 0 10 10 10
36865 - 30 30 30 78 78 78 46 46 46 22 22 22
36866 -137 92 6 210 162 10 239 182 13 238 190 10
36867 -238 202 15 241 208 19 246 215 20 246 215 20
36868 -241 208 19 203 166 17 185 133 11 210 150 10
36869 -216 158 10 210 150 10 102 78 10 2 2 6
36870 - 6 6 6 54 54 54 14 14 14 2 2 6
36871 - 2 2 6 62 62 62 74 74 74 30 30 30
36872 - 10 10 10 0 0 0 0 0 0 0 0 0
36873 - 0 0 0 0 0 0 0 0 0 0 0 0
36874 - 0 0 0 0 0 0 0 0 0 0 0 0
36875 - 0 0 0 0 0 0 0 0 0 0 0 0
36876 - 0 0 0 0 0 0 0 0 0 0 0 0
36877 - 0 0 0 0 0 0 0 0 0 0 0 0
36878 - 0 0 0 0 0 0 0 0 0 0 0 0
36879 - 0 0 0 0 0 0 0 0 0 0 0 0
36880 - 0 0 0 0 0 0 0 0 0 0 0 0
36881 - 0 0 0 0 0 0 0 0 0 0 0 0
36882 - 0 0 0 0 0 0 0 0 0 0 0 0
36883 - 0 0 0 0 0 0 0 0 0 0 0 0
36884 - 0 0 0 0 0 0 0 0 0 10 10 10
36885 - 34 34 34 78 78 78 50 50 50 6 6 6
36886 - 94 70 30 139 102 15 190 146 13 226 184 13
36887 -232 200 30 232 195 16 215 174 15 190 146 13
36888 -168 122 10 192 133 9 210 150 10 213 154 11
36889 -202 150 34 182 157 106 101 98 89 2 2 6
36890 - 2 2 6 78 78 78 116 116 116 58 58 58
36891 - 2 2 6 22 22 22 90 90 90 46 46 46
36892 - 18 18 18 6 6 6 0 0 0 0 0 0
36893 - 0 0 0 0 0 0 0 0 0 0 0 0
36894 - 0 0 0 0 0 0 0 0 0 0 0 0
36895 - 0 0 0 0 0 0 0 0 0 0 0 0
36896 - 0 0 0 0 0 0 0 0 0 0 0 0
36897 - 0 0 0 0 0 0 0 0 0 0 0 0
36898 - 0 0 0 0 0 0 0 0 0 0 0 0
36899 - 0 0 0 0 0 0 0 0 0 0 0 0
36900 - 0 0 0 0 0 0 0 0 0 0 0 0
36901 - 0 0 0 0 0 0 0 0 0 0 0 0
36902 - 0 0 0 0 0 0 0 0 0 0 0 0
36903 - 0 0 0 0 0 0 0 0 0 0 0 0
36904 - 0 0 0 0 0 0 0 0 0 10 10 10
36905 - 38 38 38 86 86 86 50 50 50 6 6 6
36906 -128 128 128 174 154 114 156 107 11 168 122 10
36907 -198 155 10 184 144 12 197 138 11 200 144 11
36908 -206 145 10 206 145 10 197 138 11 188 164 115
36909 -195 195 195 198 198 198 174 174 174 14 14 14
36910 - 2 2 6 22 22 22 116 116 116 116 116 116
36911 - 22 22 22 2 2 6 74 74 74 70 70 70
36912 - 30 30 30 10 10 10 0 0 0 0 0 0
36913 - 0 0 0 0 0 0 0 0 0 0 0 0
36914 - 0 0 0 0 0 0 0 0 0 0 0 0
36915 - 0 0 0 0 0 0 0 0 0 0 0 0
36916 - 0 0 0 0 0 0 0 0 0 0 0 0
36917 - 0 0 0 0 0 0 0 0 0 0 0 0
36918 - 0 0 0 0 0 0 0 0 0 0 0 0
36919 - 0 0 0 0 0 0 0 0 0 0 0 0
36920 - 0 0 0 0 0 0 0 0 0 0 0 0
36921 - 0 0 0 0 0 0 0 0 0 0 0 0
36922 - 0 0 0 0 0 0 0 0 0 0 0 0
36923 - 0 0 0 0 0 0 0 0 0 0 0 0
36924 - 0 0 0 0 0 0 6 6 6 18 18 18
36925 - 50 50 50 101 101 101 26 26 26 10 10 10
36926 -138 138 138 190 190 190 174 154 114 156 107 11
36927 -197 138 11 200 144 11 197 138 11 192 133 9
36928 -180 123 7 190 142 34 190 178 144 187 187 187
36929 -202 202 202 221 221 221 214 214 214 66 66 66
36930 - 2 2 6 2 2 6 50 50 50 62 62 62
36931 - 6 6 6 2 2 6 10 10 10 90 90 90
36932 - 50 50 50 18 18 18 6 6 6 0 0 0
36933 - 0 0 0 0 0 0 0 0 0 0 0 0
36934 - 0 0 0 0 0 0 0 0 0 0 0 0
36935 - 0 0 0 0 0 0 0 0 0 0 0 0
36936 - 0 0 0 0 0 0 0 0 0 0 0 0
36937 - 0 0 0 0 0 0 0 0 0 0 0 0
36938 - 0 0 0 0 0 0 0 0 0 0 0 0
36939 - 0 0 0 0 0 0 0 0 0 0 0 0
36940 - 0 0 0 0 0 0 0 0 0 0 0 0
36941 - 0 0 0 0 0 0 0 0 0 0 0 0
36942 - 0 0 0 0 0 0 0 0 0 0 0 0
36943 - 0 0 0 0 0 0 0 0 0 0 0 0
36944 - 0 0 0 0 0 0 10 10 10 34 34 34
36945 - 74 74 74 74 74 74 2 2 6 6 6 6
36946 -144 144 144 198 198 198 190 190 190 178 166 146
36947 -154 121 60 156 107 11 156 107 11 168 124 44
36948 -174 154 114 187 187 187 190 190 190 210 210 210
36949 -246 246 246 253 253 253 253 253 253 182 182 182
36950 - 6 6 6 2 2 6 2 2 6 2 2 6
36951 - 2 2 6 2 2 6 2 2 6 62 62 62
36952 - 74 74 74 34 34 34 14 14 14 0 0 0
36953 - 0 0 0 0 0 0 0 0 0 0 0 0
36954 - 0 0 0 0 0 0 0 0 0 0 0 0
36955 - 0 0 0 0 0 0 0 0 0 0 0 0
36956 - 0 0 0 0 0 0 0 0 0 0 0 0
36957 - 0 0 0 0 0 0 0 0 0 0 0 0
36958 - 0 0 0 0 0 0 0 0 0 0 0 0
36959 - 0 0 0 0 0 0 0 0 0 0 0 0
36960 - 0 0 0 0 0 0 0 0 0 0 0 0
36961 - 0 0 0 0 0 0 0 0 0 0 0 0
36962 - 0 0 0 0 0 0 0 0 0 0 0 0
36963 - 0 0 0 0 0 0 0 0 0 0 0 0
36964 - 0 0 0 10 10 10 22 22 22 54 54 54
36965 - 94 94 94 18 18 18 2 2 6 46 46 46
36966 -234 234 234 221 221 221 190 190 190 190 190 190
36967 -190 190 190 187 187 187 187 187 187 190 190 190
36968 -190 190 190 195 195 195 214 214 214 242 242 242
36969 -253 253 253 253 253 253 253 253 253 253 253 253
36970 - 82 82 82 2 2 6 2 2 6 2 2 6
36971 - 2 2 6 2 2 6 2 2 6 14 14 14
36972 - 86 86 86 54 54 54 22 22 22 6 6 6
36973 - 0 0 0 0 0 0 0 0 0 0 0 0
36974 - 0 0 0 0 0 0 0 0 0 0 0 0
36975 - 0 0 0 0 0 0 0 0 0 0 0 0
36976 - 0 0 0 0 0 0 0 0 0 0 0 0
36977 - 0 0 0 0 0 0 0 0 0 0 0 0
36978 - 0 0 0 0 0 0 0 0 0 0 0 0
36979 - 0 0 0 0 0 0 0 0 0 0 0 0
36980 - 0 0 0 0 0 0 0 0 0 0 0 0
36981 - 0 0 0 0 0 0 0 0 0 0 0 0
36982 - 0 0 0 0 0 0 0 0 0 0 0 0
36983 - 0 0 0 0 0 0 0 0 0 0 0 0
36984 - 6 6 6 18 18 18 46 46 46 90 90 90
36985 - 46 46 46 18 18 18 6 6 6 182 182 182
36986 -253 253 253 246 246 246 206 206 206 190 190 190
36987 -190 190 190 190 190 190 190 190 190 190 190 190
36988 -206 206 206 231 231 231 250 250 250 253 253 253
36989 -253 253 253 253 253 253 253 253 253 253 253 253
36990 -202 202 202 14 14 14 2 2 6 2 2 6
36991 - 2 2 6 2 2 6 2 2 6 2 2 6
36992 - 42 42 42 86 86 86 42 42 42 18 18 18
36993 - 6 6 6 0 0 0 0 0 0 0 0 0
36994 - 0 0 0 0 0 0 0 0 0 0 0 0
36995 - 0 0 0 0 0 0 0 0 0 0 0 0
36996 - 0 0 0 0 0 0 0 0 0 0 0 0
36997 - 0 0 0 0 0 0 0 0 0 0 0 0
36998 - 0 0 0 0 0 0 0 0 0 0 0 0
36999 - 0 0 0 0 0 0 0 0 0 0 0 0
37000 - 0 0 0 0 0 0 0 0 0 0 0 0
37001 - 0 0 0 0 0 0 0 0 0 0 0 0
37002 - 0 0 0 0 0 0 0 0 0 0 0 0
37003 - 0 0 0 0 0 0 0 0 0 6 6 6
37004 - 14 14 14 38 38 38 74 74 74 66 66 66
37005 - 2 2 6 6 6 6 90 90 90 250 250 250
37006 -253 253 253 253 253 253 238 238 238 198 198 198
37007 -190 190 190 190 190 190 195 195 195 221 221 221
37008 -246 246 246 253 253 253 253 253 253 253 253 253
37009 -253 253 253 253 253 253 253 253 253 253 253 253
37010 -253 253 253 82 82 82 2 2 6 2 2 6
37011 - 2 2 6 2 2 6 2 2 6 2 2 6
37012 - 2 2 6 78 78 78 70 70 70 34 34 34
37013 - 14 14 14 6 6 6 0 0 0 0 0 0
37014 - 0 0 0 0 0 0 0 0 0 0 0 0
37015 - 0 0 0 0 0 0 0 0 0 0 0 0
37016 - 0 0 0 0 0 0 0 0 0 0 0 0
37017 - 0 0 0 0 0 0 0 0 0 0 0 0
37018 - 0 0 0 0 0 0 0 0 0 0 0 0
37019 - 0 0 0 0 0 0 0 0 0 0 0 0
37020 - 0 0 0 0 0 0 0 0 0 0 0 0
37021 - 0 0 0 0 0 0 0 0 0 0 0 0
37022 - 0 0 0 0 0 0 0 0 0 0 0 0
37023 - 0 0 0 0 0 0 0 0 0 14 14 14
37024 - 34 34 34 66 66 66 78 78 78 6 6 6
37025 - 2 2 6 18 18 18 218 218 218 253 253 253
37026 -253 253 253 253 253 253 253 253 253 246 246 246
37027 -226 226 226 231 231 231 246 246 246 253 253 253
37028 -253 253 253 253 253 253 253 253 253 253 253 253
37029 -253 253 253 253 253 253 253 253 253 253 253 253
37030 -253 253 253 178 178 178 2 2 6 2 2 6
37031 - 2 2 6 2 2 6 2 2 6 2 2 6
37032 - 2 2 6 18 18 18 90 90 90 62 62 62
37033 - 30 30 30 10 10 10 0 0 0 0 0 0
37034 - 0 0 0 0 0 0 0 0 0 0 0 0
37035 - 0 0 0 0 0 0 0 0 0 0 0 0
37036 - 0 0 0 0 0 0 0 0 0 0 0 0
37037 - 0 0 0 0 0 0 0 0 0 0 0 0
37038 - 0 0 0 0 0 0 0 0 0 0 0 0
37039 - 0 0 0 0 0 0 0 0 0 0 0 0
37040 - 0 0 0 0 0 0 0 0 0 0 0 0
37041 - 0 0 0 0 0 0 0 0 0 0 0 0
37042 - 0 0 0 0 0 0 0 0 0 0 0 0
37043 - 0 0 0 0 0 0 10 10 10 26 26 26
37044 - 58 58 58 90 90 90 18 18 18 2 2 6
37045 - 2 2 6 110 110 110 253 253 253 253 253 253
37046 -253 253 253 253 253 253 253 253 253 253 253 253
37047 -250 250 250 253 253 253 253 253 253 253 253 253
37048 -253 253 253 253 253 253 253 253 253 253 253 253
37049 -253 253 253 253 253 253 253 253 253 253 253 253
37050 -253 253 253 231 231 231 18 18 18 2 2 6
37051 - 2 2 6 2 2 6 2 2 6 2 2 6
37052 - 2 2 6 2 2 6 18 18 18 94 94 94
37053 - 54 54 54 26 26 26 10 10 10 0 0 0
37054 - 0 0 0 0 0 0 0 0 0 0 0 0
37055 - 0 0 0 0 0 0 0 0 0 0 0 0
37056 - 0 0 0 0 0 0 0 0 0 0 0 0
37057 - 0 0 0 0 0 0 0 0 0 0 0 0
37058 - 0 0 0 0 0 0 0 0 0 0 0 0
37059 - 0 0 0 0 0 0 0 0 0 0 0 0
37060 - 0 0 0 0 0 0 0 0 0 0 0 0
37061 - 0 0 0 0 0 0 0 0 0 0 0 0
37062 - 0 0 0 0 0 0 0 0 0 0 0 0
37063 - 0 0 0 6 6 6 22 22 22 50 50 50
37064 - 90 90 90 26 26 26 2 2 6 2 2 6
37065 - 14 14 14 195 195 195 250 250 250 253 253 253
37066 -253 253 253 253 253 253 253 253 253 253 253 253
37067 -253 253 253 253 253 253 253 253 253 253 253 253
37068 -253 253 253 253 253 253 253 253 253 253 253 253
37069 -253 253 253 253 253 253 253 253 253 253 253 253
37070 -250 250 250 242 242 242 54 54 54 2 2 6
37071 - 2 2 6 2 2 6 2 2 6 2 2 6
37072 - 2 2 6 2 2 6 2 2 6 38 38 38
37073 - 86 86 86 50 50 50 22 22 22 6 6 6
37074 - 0 0 0 0 0 0 0 0 0 0 0 0
37075 - 0 0 0 0 0 0 0 0 0 0 0 0
37076 - 0 0 0 0 0 0 0 0 0 0 0 0
37077 - 0 0 0 0 0 0 0 0 0 0 0 0
37078 - 0 0 0 0 0 0 0 0 0 0 0 0
37079 - 0 0 0 0 0 0 0 0 0 0 0 0
37080 - 0 0 0 0 0 0 0 0 0 0 0 0
37081 - 0 0 0 0 0 0 0 0 0 0 0 0
37082 - 0 0 0 0 0 0 0 0 0 0 0 0
37083 - 6 6 6 14 14 14 38 38 38 82 82 82
37084 - 34 34 34 2 2 6 2 2 6 2 2 6
37085 - 42 42 42 195 195 195 246 246 246 253 253 253
37086 -253 253 253 253 253 253 253 253 253 250 250 250
37087 -242 242 242 242 242 242 250 250 250 253 253 253
37088 -253 253 253 253 253 253 253 253 253 253 253 253
37089 -253 253 253 250 250 250 246 246 246 238 238 238
37090 -226 226 226 231 231 231 101 101 101 6 6 6
37091 - 2 2 6 2 2 6 2 2 6 2 2 6
37092 - 2 2 6 2 2 6 2 2 6 2 2 6
37093 - 38 38 38 82 82 82 42 42 42 14 14 14
37094 - 6 6 6 0 0 0 0 0 0 0 0 0
37095 - 0 0 0 0 0 0 0 0 0 0 0 0
37096 - 0 0 0 0 0 0 0 0 0 0 0 0
37097 - 0 0 0 0 0 0 0 0 0 0 0 0
37098 - 0 0 0 0 0 0 0 0 0 0 0 0
37099 - 0 0 0 0 0 0 0 0 0 0 0 0
37100 - 0 0 0 0 0 0 0 0 0 0 0 0
37101 - 0 0 0 0 0 0 0 0 0 0 0 0
37102 - 0 0 0 0 0 0 0 0 0 0 0 0
37103 - 10 10 10 26 26 26 62 62 62 66 66 66
37104 - 2 2 6 2 2 6 2 2 6 6 6 6
37105 - 70 70 70 170 170 170 206 206 206 234 234 234
37106 -246 246 246 250 250 250 250 250 250 238 238 238
37107 -226 226 226 231 231 231 238 238 238 250 250 250
37108 -250 250 250 250 250 250 246 246 246 231 231 231
37109 -214 214 214 206 206 206 202 202 202 202 202 202
37110 -198 198 198 202 202 202 182 182 182 18 18 18
37111 - 2 2 6 2 2 6 2 2 6 2 2 6
37112 - 2 2 6 2 2 6 2 2 6 2 2 6
37113 - 2 2 6 62 62 62 66 66 66 30 30 30
37114 - 10 10 10 0 0 0 0 0 0 0 0 0
37115 - 0 0 0 0 0 0 0 0 0 0 0 0
37116 - 0 0 0 0 0 0 0 0 0 0 0 0
37117 - 0 0 0 0 0 0 0 0 0 0 0 0
37118 - 0 0 0 0 0 0 0 0 0 0 0 0
37119 - 0 0 0 0 0 0 0 0 0 0 0 0
37120 - 0 0 0 0 0 0 0 0 0 0 0 0
37121 - 0 0 0 0 0 0 0 0 0 0 0 0
37122 - 0 0 0 0 0 0 0 0 0 0 0 0
37123 - 14 14 14 42 42 42 82 82 82 18 18 18
37124 - 2 2 6 2 2 6 2 2 6 10 10 10
37125 - 94 94 94 182 182 182 218 218 218 242 242 242
37126 -250 250 250 253 253 253 253 253 253 250 250 250
37127 -234 234 234 253 253 253 253 253 253 253 253 253
37128 -253 253 253 253 253 253 253 253 253 246 246 246
37129 -238 238 238 226 226 226 210 210 210 202 202 202
37130 -195 195 195 195 195 195 210 210 210 158 158 158
37131 - 6 6 6 14 14 14 50 50 50 14 14 14
37132 - 2 2 6 2 2 6 2 2 6 2 2 6
37133 - 2 2 6 6 6 6 86 86 86 46 46 46
37134 - 18 18 18 6 6 6 0 0 0 0 0 0
37135 - 0 0 0 0 0 0 0 0 0 0 0 0
37136 - 0 0 0 0 0 0 0 0 0 0 0 0
37137 - 0 0 0 0 0 0 0 0 0 0 0 0
37138 - 0 0 0 0 0 0 0 0 0 0 0 0
37139 - 0 0 0 0 0 0 0 0 0 0 0 0
37140 - 0 0 0 0 0 0 0 0 0 0 0 0
37141 - 0 0 0 0 0 0 0 0 0 0 0 0
37142 - 0 0 0 0 0 0 0 0 0 6 6 6
37143 - 22 22 22 54 54 54 70 70 70 2 2 6
37144 - 2 2 6 10 10 10 2 2 6 22 22 22
37145 -166 166 166 231 231 231 250 250 250 253 253 253
37146 -253 253 253 253 253 253 253 253 253 250 250 250
37147 -242 242 242 253 253 253 253 253 253 253 253 253
37148 -253 253 253 253 253 253 253 253 253 253 253 253
37149 -253 253 253 253 253 253 253 253 253 246 246 246
37150 -231 231 231 206 206 206 198 198 198 226 226 226
37151 - 94 94 94 2 2 6 6 6 6 38 38 38
37152 - 30 30 30 2 2 6 2 2 6 2 2 6
37153 - 2 2 6 2 2 6 62 62 62 66 66 66
37154 - 26 26 26 10 10 10 0 0 0 0 0 0
37155 - 0 0 0 0 0 0 0 0 0 0 0 0
37156 - 0 0 0 0 0 0 0 0 0 0 0 0
37157 - 0 0 0 0 0 0 0 0 0 0 0 0
37158 - 0 0 0 0 0 0 0 0 0 0 0 0
37159 - 0 0 0 0 0 0 0 0 0 0 0 0
37160 - 0 0 0 0 0 0 0 0 0 0 0 0
37161 - 0 0 0 0 0 0 0 0 0 0 0 0
37162 - 0 0 0 0 0 0 0 0 0 10 10 10
37163 - 30 30 30 74 74 74 50 50 50 2 2 6
37164 - 26 26 26 26 26 26 2 2 6 106 106 106
37165 -238 238 238 253 253 253 253 253 253 253 253 253
37166 -253 253 253 253 253 253 253 253 253 253 253 253
37167 -253 253 253 253 253 253 253 253 253 253 253 253
37168 -253 253 253 253 253 253 253 253 253 253 253 253
37169 -253 253 253 253 253 253 253 253 253 253 253 253
37170 -253 253 253 246 246 246 218 218 218 202 202 202
37171 -210 210 210 14 14 14 2 2 6 2 2 6
37172 - 30 30 30 22 22 22 2 2 6 2 2 6
37173 - 2 2 6 2 2 6 18 18 18 86 86 86
37174 - 42 42 42 14 14 14 0 0 0 0 0 0
37175 - 0 0 0 0 0 0 0 0 0 0 0 0
37176 - 0 0 0 0 0 0 0 0 0 0 0 0
37177 - 0 0 0 0 0 0 0 0 0 0 0 0
37178 - 0 0 0 0 0 0 0 0 0 0 0 0
37179 - 0 0 0 0 0 0 0 0 0 0 0 0
37180 - 0 0 0 0 0 0 0 0 0 0 0 0
37181 - 0 0 0 0 0 0 0 0 0 0 0 0
37182 - 0 0 0 0 0 0 0 0 0 14 14 14
37183 - 42 42 42 90 90 90 22 22 22 2 2 6
37184 - 42 42 42 2 2 6 18 18 18 218 218 218
37185 -253 253 253 253 253 253 253 253 253 253 253 253
37186 -253 253 253 253 253 253 253 253 253 253 253 253
37187 -253 253 253 253 253 253 253 253 253 253 253 253
37188 -253 253 253 253 253 253 253 253 253 253 253 253
37189 -253 253 253 253 253 253 253 253 253 253 253 253
37190 -253 253 253 253 253 253 250 250 250 221 221 221
37191 -218 218 218 101 101 101 2 2 6 14 14 14
37192 - 18 18 18 38 38 38 10 10 10 2 2 6
37193 - 2 2 6 2 2 6 2 2 6 78 78 78
37194 - 58 58 58 22 22 22 6 6 6 0 0 0
37195 - 0 0 0 0 0 0 0 0 0 0 0 0
37196 - 0 0 0 0 0 0 0 0 0 0 0 0
37197 - 0 0 0 0 0 0 0 0 0 0 0 0
37198 - 0 0 0 0 0 0 0 0 0 0 0 0
37199 - 0 0 0 0 0 0 0 0 0 0 0 0
37200 - 0 0 0 0 0 0 0 0 0 0 0 0
37201 - 0 0 0 0 0 0 0 0 0 0 0 0
37202 - 0 0 0 0 0 0 6 6 6 18 18 18
37203 - 54 54 54 82 82 82 2 2 6 26 26 26
37204 - 22 22 22 2 2 6 123 123 123 253 253 253
37205 -253 253 253 253 253 253 253 253 253 253 253 253
37206 -253 253 253 253 253 253 253 253 253 253 253 253
37207 -253 253 253 253 253 253 253 253 253 253 253 253
37208 -253 253 253 253 253 253 253 253 253 253 253 253
37209 -253 253 253 253 253 253 253 253 253 253 253 253
37210 -253 253 253 253 253 253 253 253 253 250 250 250
37211 -238 238 238 198 198 198 6 6 6 38 38 38
37212 - 58 58 58 26 26 26 38 38 38 2 2 6
37213 - 2 2 6 2 2 6 2 2 6 46 46 46
37214 - 78 78 78 30 30 30 10 10 10 0 0 0
37215 - 0 0 0 0 0 0 0 0 0 0 0 0
37216 - 0 0 0 0 0 0 0 0 0 0 0 0
37217 - 0 0 0 0 0 0 0 0 0 0 0 0
37218 - 0 0 0 0 0 0 0 0 0 0 0 0
37219 - 0 0 0 0 0 0 0 0 0 0 0 0
37220 - 0 0 0 0 0 0 0 0 0 0 0 0
37221 - 0 0 0 0 0 0 0 0 0 0 0 0
37222 - 0 0 0 0 0 0 10 10 10 30 30 30
37223 - 74 74 74 58 58 58 2 2 6 42 42 42
37224 - 2 2 6 22 22 22 231 231 231 253 253 253
37225 -253 253 253 253 253 253 253 253 253 253 253 253
37226 -253 253 253 253 253 253 253 253 253 250 250 250
37227 -253 253 253 253 253 253 253 253 253 253 253 253
37228 -253 253 253 253 253 253 253 253 253 253 253 253
37229 -253 253 253 253 253 253 253 253 253 253 253 253
37230 -253 253 253 253 253 253 253 253 253 253 253 253
37231 -253 253 253 246 246 246 46 46 46 38 38 38
37232 - 42 42 42 14 14 14 38 38 38 14 14 14
37233 - 2 2 6 2 2 6 2 2 6 6 6 6
37234 - 86 86 86 46 46 46 14 14 14 0 0 0
37235 - 0 0 0 0 0 0 0 0 0 0 0 0
37236 - 0 0 0 0 0 0 0 0 0 0 0 0
37237 - 0 0 0 0 0 0 0 0 0 0 0 0
37238 - 0 0 0 0 0 0 0 0 0 0 0 0
37239 - 0 0 0 0 0 0 0 0 0 0 0 0
37240 - 0 0 0 0 0 0 0 0 0 0 0 0
37241 - 0 0 0 0 0 0 0 0 0 0 0 0
37242 - 0 0 0 6 6 6 14 14 14 42 42 42
37243 - 90 90 90 18 18 18 18 18 18 26 26 26
37244 - 2 2 6 116 116 116 253 253 253 253 253 253
37245 -253 253 253 253 253 253 253 253 253 253 253 253
37246 -253 253 253 253 253 253 250 250 250 238 238 238
37247 -253 253 253 253 253 253 253 253 253 253 253 253
37248 -253 253 253 253 253 253 253 253 253 253 253 253
37249 -253 253 253 253 253 253 253 253 253 253 253 253
37250 -253 253 253 253 253 253 253 253 253 253 253 253
37251 -253 253 253 253 253 253 94 94 94 6 6 6
37252 - 2 2 6 2 2 6 10 10 10 34 34 34
37253 - 2 2 6 2 2 6 2 2 6 2 2 6
37254 - 74 74 74 58 58 58 22 22 22 6 6 6
37255 - 0 0 0 0 0 0 0 0 0 0 0 0
37256 - 0 0 0 0 0 0 0 0 0 0 0 0
37257 - 0 0 0 0 0 0 0 0 0 0 0 0
37258 - 0 0 0 0 0 0 0 0 0 0 0 0
37259 - 0 0 0 0 0 0 0 0 0 0 0 0
37260 - 0 0 0 0 0 0 0 0 0 0 0 0
37261 - 0 0 0 0 0 0 0 0 0 0 0 0
37262 - 0 0 0 10 10 10 26 26 26 66 66 66
37263 - 82 82 82 2 2 6 38 38 38 6 6 6
37264 - 14 14 14 210 210 210 253 253 253 253 253 253
37265 -253 253 253 253 253 253 253 253 253 253 253 253
37266 -253 253 253 253 253 253 246 246 246 242 242 242
37267 -253 253 253 253 253 253 253 253 253 253 253 253
37268 -253 253 253 253 253 253 253 253 253 253 253 253
37269 -253 253 253 253 253 253 253 253 253 253 253 253
37270 -253 253 253 253 253 253 253 253 253 253 253 253
37271 -253 253 253 253 253 253 144 144 144 2 2 6
37272 - 2 2 6 2 2 6 2 2 6 46 46 46
37273 - 2 2 6 2 2 6 2 2 6 2 2 6
37274 - 42 42 42 74 74 74 30 30 30 10 10 10
37275 - 0 0 0 0 0 0 0 0 0 0 0 0
37276 - 0 0 0 0 0 0 0 0 0 0 0 0
37277 - 0 0 0 0 0 0 0 0 0 0 0 0
37278 - 0 0 0 0 0 0 0 0 0 0 0 0
37279 - 0 0 0 0 0 0 0 0 0 0 0 0
37280 - 0 0 0 0 0 0 0 0 0 0 0 0
37281 - 0 0 0 0 0 0 0 0 0 0 0 0
37282 - 6 6 6 14 14 14 42 42 42 90 90 90
37283 - 26 26 26 6 6 6 42 42 42 2 2 6
37284 - 74 74 74 250 250 250 253 253 253 253 253 253
37285 -253 253 253 253 253 253 253 253 253 253 253 253
37286 -253 253 253 253 253 253 242 242 242 242 242 242
37287 -253 253 253 253 253 253 253 253 253 253 253 253
37288 -253 253 253 253 253 253 253 253 253 253 253 253
37289 -253 253 253 253 253 253 253 253 253 253 253 253
37290 -253 253 253 253 253 253 253 253 253 253 253 253
37291 -253 253 253 253 253 253 182 182 182 2 2 6
37292 - 2 2 6 2 2 6 2 2 6 46 46 46
37293 - 2 2 6 2 2 6 2 2 6 2 2 6
37294 - 10 10 10 86 86 86 38 38 38 10 10 10
37295 - 0 0 0 0 0 0 0 0 0 0 0 0
37296 - 0 0 0 0 0 0 0 0 0 0 0 0
37297 - 0 0 0 0 0 0 0 0 0 0 0 0
37298 - 0 0 0 0 0 0 0 0 0 0 0 0
37299 - 0 0 0 0 0 0 0 0 0 0 0 0
37300 - 0 0 0 0 0 0 0 0 0 0 0 0
37301 - 0 0 0 0 0 0 0 0 0 0 0 0
37302 - 10 10 10 26 26 26 66 66 66 82 82 82
37303 - 2 2 6 22 22 22 18 18 18 2 2 6
37304 -149 149 149 253 253 253 253 253 253 253 253 253
37305 -253 253 253 253 253 253 253 253 253 253 253 253
37306 -253 253 253 253 253 253 234 234 234 242 242 242
37307 -253 253 253 253 253 253 253 253 253 253 253 253
37308 -253 253 253 253 253 253 253 253 253 253 253 253
37309 -253 253 253 253 253 253 253 253 253 253 253 253
37310 -253 253 253 253 253 253 253 253 253 253 253 253
37311 -253 253 253 253 253 253 206 206 206 2 2 6
37312 - 2 2 6 2 2 6 2 2 6 38 38 38
37313 - 2 2 6 2 2 6 2 2 6 2 2 6
37314 - 6 6 6 86 86 86 46 46 46 14 14 14
37315 - 0 0 0 0 0 0 0 0 0 0 0 0
37316 - 0 0 0 0 0 0 0 0 0 0 0 0
37317 - 0 0 0 0 0 0 0 0 0 0 0 0
37318 - 0 0 0 0 0 0 0 0 0 0 0 0
37319 - 0 0 0 0 0 0 0 0 0 0 0 0
37320 - 0 0 0 0 0 0 0 0 0 0 0 0
37321 - 0 0 0 0 0 0 0 0 0 6 6 6
37322 - 18 18 18 46 46 46 86 86 86 18 18 18
37323 - 2 2 6 34 34 34 10 10 10 6 6 6
37324 -210 210 210 253 253 253 253 253 253 253 253 253
37325 -253 253 253 253 253 253 253 253 253 253 253 253
37326 -253 253 253 253 253 253 234 234 234 242 242 242
37327 -253 253 253 253 253 253 253 253 253 253 253 253
37328 -253 253 253 253 253 253 253 253 253 253 253 253
37329 -253 253 253 253 253 253 253 253 253 253 253 253
37330 -253 253 253 253 253 253 253 253 253 253 253 253
37331 -253 253 253 253 253 253 221 221 221 6 6 6
37332 - 2 2 6 2 2 6 6 6 6 30 30 30
37333 - 2 2 6 2 2 6 2 2 6 2 2 6
37334 - 2 2 6 82 82 82 54 54 54 18 18 18
37335 - 6 6 6 0 0 0 0 0 0 0 0 0
37336 - 0 0 0 0 0 0 0 0 0 0 0 0
37337 - 0 0 0 0 0 0 0 0 0 0 0 0
37338 - 0 0 0 0 0 0 0 0 0 0 0 0
37339 - 0 0 0 0 0 0 0 0 0 0 0 0
37340 - 0 0 0 0 0 0 0 0 0 0 0 0
37341 - 0 0 0 0 0 0 0 0 0 10 10 10
37342 - 26 26 26 66 66 66 62 62 62 2 2 6
37343 - 2 2 6 38 38 38 10 10 10 26 26 26
37344 -238 238 238 253 253 253 253 253 253 253 253 253
37345 -253 253 253 253 253 253 253 253 253 253 253 253
37346 -253 253 253 253 253 253 231 231 231 238 238 238
37347 -253 253 253 253 253 253 253 253 253 253 253 253
37348 -253 253 253 253 253 253 253 253 253 253 253 253
37349 -253 253 253 253 253 253 253 253 253 253 253 253
37350 -253 253 253 253 253 253 253 253 253 253 253 253
37351 -253 253 253 253 253 253 231 231 231 6 6 6
37352 - 2 2 6 2 2 6 10 10 10 30 30 30
37353 - 2 2 6 2 2 6 2 2 6 2 2 6
37354 - 2 2 6 66 66 66 58 58 58 22 22 22
37355 - 6 6 6 0 0 0 0 0 0 0 0 0
37356 - 0 0 0 0 0 0 0 0 0 0 0 0
37357 - 0 0 0 0 0 0 0 0 0 0 0 0
37358 - 0 0 0 0 0 0 0 0 0 0 0 0
37359 - 0 0 0 0 0 0 0 0 0 0 0 0
37360 - 0 0 0 0 0 0 0 0 0 0 0 0
37361 - 0 0 0 0 0 0 0 0 0 10 10 10
37362 - 38 38 38 78 78 78 6 6 6 2 2 6
37363 - 2 2 6 46 46 46 14 14 14 42 42 42
37364 -246 246 246 253 253 253 253 253 253 253 253 253
37365 -253 253 253 253 253 253 253 253 253 253 253 253
37366 -253 253 253 253 253 253 231 231 231 242 242 242
37367 -253 253 253 253 253 253 253 253 253 253 253 253
37368 -253 253 253 253 253 253 253 253 253 253 253 253
37369 -253 253 253 253 253 253 253 253 253 253 253 253
37370 -253 253 253 253 253 253 253 253 253 253 253 253
37371 -253 253 253 253 253 253 234 234 234 10 10 10
37372 - 2 2 6 2 2 6 22 22 22 14 14 14
37373 - 2 2 6 2 2 6 2 2 6 2 2 6
37374 - 2 2 6 66 66 66 62 62 62 22 22 22
37375 - 6 6 6 0 0 0 0 0 0 0 0 0
37376 - 0 0 0 0 0 0 0 0 0 0 0 0
37377 - 0 0 0 0 0 0 0 0 0 0 0 0
37378 - 0 0 0 0 0 0 0 0 0 0 0 0
37379 - 0 0 0 0 0 0 0 0 0 0 0 0
37380 - 0 0 0 0 0 0 0 0 0 0 0 0
37381 - 0 0 0 0 0 0 6 6 6 18 18 18
37382 - 50 50 50 74 74 74 2 2 6 2 2 6
37383 - 14 14 14 70 70 70 34 34 34 62 62 62
37384 -250 250 250 253 253 253 253 253 253 253 253 253
37385 -253 253 253 253 253 253 253 253 253 253 253 253
37386 -253 253 253 253 253 253 231 231 231 246 246 246
37387 -253 253 253 253 253 253 253 253 253 253 253 253
37388 -253 253 253 253 253 253 253 253 253 253 253 253
37389 -253 253 253 253 253 253 253 253 253 253 253 253
37390 -253 253 253 253 253 253 253 253 253 253 253 253
37391 -253 253 253 253 253 253 234 234 234 14 14 14
37392 - 2 2 6 2 2 6 30 30 30 2 2 6
37393 - 2 2 6 2 2 6 2 2 6 2 2 6
37394 - 2 2 6 66 66 66 62 62 62 22 22 22
37395 - 6 6 6 0 0 0 0 0 0 0 0 0
37396 - 0 0 0 0 0 0 0 0 0 0 0 0
37397 - 0 0 0 0 0 0 0 0 0 0 0 0
37398 - 0 0 0 0 0 0 0 0 0 0 0 0
37399 - 0 0 0 0 0 0 0 0 0 0 0 0
37400 - 0 0 0 0 0 0 0 0 0 0 0 0
37401 - 0 0 0 0 0 0 6 6 6 18 18 18
37402 - 54 54 54 62 62 62 2 2 6 2 2 6
37403 - 2 2 6 30 30 30 46 46 46 70 70 70
37404 -250 250 250 253 253 253 253 253 253 253 253 253
37405 -253 253 253 253 253 253 253 253 253 253 253 253
37406 -253 253 253 253 253 253 231 231 231 246 246 246
37407 -253 253 253 253 253 253 253 253 253 253 253 253
37408 -253 253 253 253 253 253 253 253 253 253 253 253
37409 -253 253 253 253 253 253 253 253 253 253 253 253
37410 -253 253 253 253 253 253 253 253 253 253 253 253
37411 -253 253 253 253 253 253 226 226 226 10 10 10
37412 - 2 2 6 6 6 6 30 30 30 2 2 6
37413 - 2 2 6 2 2 6 2 2 6 2 2 6
37414 - 2 2 6 66 66 66 58 58 58 22 22 22
37415 - 6 6 6 0 0 0 0 0 0 0 0 0
37416 - 0 0 0 0 0 0 0 0 0 0 0 0
37417 - 0 0 0 0 0 0 0 0 0 0 0 0
37418 - 0 0 0 0 0 0 0 0 0 0 0 0
37419 - 0 0 0 0 0 0 0 0 0 0 0 0
37420 - 0 0 0 0 0 0 0 0 0 0 0 0
37421 - 0 0 0 0 0 0 6 6 6 22 22 22
37422 - 58 58 58 62 62 62 2 2 6 2 2 6
37423 - 2 2 6 2 2 6 30 30 30 78 78 78
37424 -250 250 250 253 253 253 253 253 253 253 253 253
37425 -253 253 253 253 253 253 253 253 253 253 253 253
37426 -253 253 253 253 253 253 231 231 231 246 246 246
37427 -253 253 253 253 253 253 253 253 253 253 253 253
37428 -253 253 253 253 253 253 253 253 253 253 253 253
37429 -253 253 253 253 253 253 253 253 253 253 253 253
37430 -253 253 253 253 253 253 253 253 253 253 253 253
37431 -253 253 253 253 253 253 206 206 206 2 2 6
37432 - 22 22 22 34 34 34 18 14 6 22 22 22
37433 - 26 26 26 18 18 18 6 6 6 2 2 6
37434 - 2 2 6 82 82 82 54 54 54 18 18 18
37435 - 6 6 6 0 0 0 0 0 0 0 0 0
37436 - 0 0 0 0 0 0 0 0 0 0 0 0
37437 - 0 0 0 0 0 0 0 0 0 0 0 0
37438 - 0 0 0 0 0 0 0 0 0 0 0 0
37439 - 0 0 0 0 0 0 0 0 0 0 0 0
37440 - 0 0 0 0 0 0 0 0 0 0 0 0
37441 - 0 0 0 0 0 0 6 6 6 26 26 26
37442 - 62 62 62 106 106 106 74 54 14 185 133 11
37443 -210 162 10 121 92 8 6 6 6 62 62 62
37444 -238 238 238 253 253 253 253 253 253 253 253 253
37445 -253 253 253 253 253 253 253 253 253 253 253 253
37446 -253 253 253 253 253 253 231 231 231 246 246 246
37447 -253 253 253 253 253 253 253 253 253 253 253 253
37448 -253 253 253 253 253 253 253 253 253 253 253 253
37449 -253 253 253 253 253 253 253 253 253 253 253 253
37450 -253 253 253 253 253 253 253 253 253 253 253 253
37451 -253 253 253 253 253 253 158 158 158 18 18 18
37452 - 14 14 14 2 2 6 2 2 6 2 2 6
37453 - 6 6 6 18 18 18 66 66 66 38 38 38
37454 - 6 6 6 94 94 94 50 50 50 18 18 18
37455 - 6 6 6 0 0 0 0 0 0 0 0 0
37456 - 0 0 0 0 0 0 0 0 0 0 0 0
37457 - 0 0 0 0 0 0 0 0 0 0 0 0
37458 - 0 0 0 0 0 0 0 0 0 0 0 0
37459 - 0 0 0 0 0 0 0 0 0 0 0 0
37460 - 0 0 0 0 0 0 0 0 0 6 6 6
37461 - 10 10 10 10 10 10 18 18 18 38 38 38
37462 - 78 78 78 142 134 106 216 158 10 242 186 14
37463 -246 190 14 246 190 14 156 118 10 10 10 10
37464 - 90 90 90 238 238 238 253 253 253 253 253 253
37465 -253 253 253 253 253 253 253 253 253 253 253 253
37466 -253 253 253 253 253 253 231 231 231 250 250 250
37467 -253 253 253 253 253 253 253 253 253 253 253 253
37468 -253 253 253 253 253 253 253 253 253 253 253 253
37469 -253 253 253 253 253 253 253 253 253 253 253 253
37470 -253 253 253 253 253 253 253 253 253 246 230 190
37471 -238 204 91 238 204 91 181 142 44 37 26 9
37472 - 2 2 6 2 2 6 2 2 6 2 2 6
37473 - 2 2 6 2 2 6 38 38 38 46 46 46
37474 - 26 26 26 106 106 106 54 54 54 18 18 18
37475 - 6 6 6 0 0 0 0 0 0 0 0 0
37476 - 0 0 0 0 0 0 0 0 0 0 0 0
37477 - 0 0 0 0 0 0 0 0 0 0 0 0
37478 - 0 0 0 0 0 0 0 0 0 0 0 0
37479 - 0 0 0 0 0 0 0 0 0 0 0 0
37480 - 0 0 0 6 6 6 14 14 14 22 22 22
37481 - 30 30 30 38 38 38 50 50 50 70 70 70
37482 -106 106 106 190 142 34 226 170 11 242 186 14
37483 -246 190 14 246 190 14 246 190 14 154 114 10
37484 - 6 6 6 74 74 74 226 226 226 253 253 253
37485 -253 253 253 253 253 253 253 253 253 253 253 253
37486 -253 253 253 253 253 253 231 231 231 250 250 250
37487 -253 253 253 253 253 253 253 253 253 253 253 253
37488 -253 253 253 253 253 253 253 253 253 253 253 253
37489 -253 253 253 253 253 253 253 253 253 253 253 253
37490 -253 253 253 253 253 253 253 253 253 228 184 62
37491 -241 196 14 241 208 19 232 195 16 38 30 10
37492 - 2 2 6 2 2 6 2 2 6 2 2 6
37493 - 2 2 6 6 6 6 30 30 30 26 26 26
37494 -203 166 17 154 142 90 66 66 66 26 26 26
37495 - 6 6 6 0 0 0 0 0 0 0 0 0
37496 - 0 0 0 0 0 0 0 0 0 0 0 0
37497 - 0 0 0 0 0 0 0 0 0 0 0 0
37498 - 0 0 0 0 0 0 0 0 0 0 0 0
37499 - 0 0 0 0 0 0 0 0 0 0 0 0
37500 - 6 6 6 18 18 18 38 38 38 58 58 58
37501 - 78 78 78 86 86 86 101 101 101 123 123 123
37502 -175 146 61 210 150 10 234 174 13 246 186 14
37503 -246 190 14 246 190 14 246 190 14 238 190 10
37504 -102 78 10 2 2 6 46 46 46 198 198 198
37505 -253 253 253 253 253 253 253 253 253 253 253 253
37506 -253 253 253 253 253 253 234 234 234 242 242 242
37507 -253 253 253 253 253 253 253 253 253 253 253 253
37508 -253 253 253 253 253 253 253 253 253 253 253 253
37509 -253 253 253 253 253 253 253 253 253 253 253 253
37510 -253 253 253 253 253 253 253 253 253 224 178 62
37511 -242 186 14 241 196 14 210 166 10 22 18 6
37512 - 2 2 6 2 2 6 2 2 6 2 2 6
37513 - 2 2 6 2 2 6 6 6 6 121 92 8
37514 -238 202 15 232 195 16 82 82 82 34 34 34
37515 - 10 10 10 0 0 0 0 0 0 0 0 0
37516 - 0 0 0 0 0 0 0 0 0 0 0 0
37517 - 0 0 0 0 0 0 0 0 0 0 0 0
37518 - 0 0 0 0 0 0 0 0 0 0 0 0
37519 - 0 0 0 0 0 0 0 0 0 0 0 0
37520 - 14 14 14 38 38 38 70 70 70 154 122 46
37521 -190 142 34 200 144 11 197 138 11 197 138 11
37522 -213 154 11 226 170 11 242 186 14 246 190 14
37523 -246 190 14 246 190 14 246 190 14 246 190 14
37524 -225 175 15 46 32 6 2 2 6 22 22 22
37525 -158 158 158 250 250 250 253 253 253 253 253 253
37526 -253 253 253 253 253 253 253 253 253 253 253 253
37527 -253 253 253 253 253 253 253 253 253 253 253 253
37528 -253 253 253 253 253 253 253 253 253 253 253 253
37529 -253 253 253 253 253 253 253 253 253 253 253 253
37530 -253 253 253 250 250 250 242 242 242 224 178 62
37531 -239 182 13 236 186 11 213 154 11 46 32 6
37532 - 2 2 6 2 2 6 2 2 6 2 2 6
37533 - 2 2 6 2 2 6 61 42 6 225 175 15
37534 -238 190 10 236 186 11 112 100 78 42 42 42
37535 - 14 14 14 0 0 0 0 0 0 0 0 0
37536 - 0 0 0 0 0 0 0 0 0 0 0 0
37537 - 0 0 0 0 0 0 0 0 0 0 0 0
37538 - 0 0 0 0 0 0 0 0 0 0 0 0
37539 - 0 0 0 0 0 0 0 0 0 6 6 6
37540 - 22 22 22 54 54 54 154 122 46 213 154 11
37541 -226 170 11 230 174 11 226 170 11 226 170 11
37542 -236 178 12 242 186 14 246 190 14 246 190 14
37543 -246 190 14 246 190 14 246 190 14 246 190 14
37544 -241 196 14 184 144 12 10 10 10 2 2 6
37545 - 6 6 6 116 116 116 242 242 242 253 253 253
37546 -253 253 253 253 253 253 253 253 253 253 253 253
37547 -253 253 253 253 253 253 253 253 253 253 253 253
37548 -253 253 253 253 253 253 253 253 253 253 253 253
37549 -253 253 253 253 253 253 253 253 253 253 253 253
37550 -253 253 253 231 231 231 198 198 198 214 170 54
37551 -236 178 12 236 178 12 210 150 10 137 92 6
37552 - 18 14 6 2 2 6 2 2 6 2 2 6
37553 - 6 6 6 70 47 6 200 144 11 236 178 12
37554 -239 182 13 239 182 13 124 112 88 58 58 58
37555 - 22 22 22 6 6 6 0 0 0 0 0 0
37556 - 0 0 0 0 0 0 0 0 0 0 0 0
37557 - 0 0 0 0 0 0 0 0 0 0 0 0
37558 - 0 0 0 0 0 0 0 0 0 0 0 0
37559 - 0 0 0 0 0 0 0 0 0 10 10 10
37560 - 30 30 30 70 70 70 180 133 36 226 170 11
37561 -239 182 13 242 186 14 242 186 14 246 186 14
37562 -246 190 14 246 190 14 246 190 14 246 190 14
37563 -246 190 14 246 190 14 246 190 14 246 190 14
37564 -246 190 14 232 195 16 98 70 6 2 2 6
37565 - 2 2 6 2 2 6 66 66 66 221 221 221
37566 -253 253 253 253 253 253 253 253 253 253 253 253
37567 -253 253 253 253 253 253 253 253 253 253 253 253
37568 -253 253 253 253 253 253 253 253 253 253 253 253
37569 -253 253 253 253 253 253 253 253 253 253 253 253
37570 -253 253 253 206 206 206 198 198 198 214 166 58
37571 -230 174 11 230 174 11 216 158 10 192 133 9
37572 -163 110 8 116 81 8 102 78 10 116 81 8
37573 -167 114 7 197 138 11 226 170 11 239 182 13
37574 -242 186 14 242 186 14 162 146 94 78 78 78
37575 - 34 34 34 14 14 14 6 6 6 0 0 0
37576 - 0 0 0 0 0 0 0 0 0 0 0 0
37577 - 0 0 0 0 0 0 0 0 0 0 0 0
37578 - 0 0 0 0 0 0 0 0 0 0 0 0
37579 - 0 0 0 0 0 0 0 0 0 6 6 6
37580 - 30 30 30 78 78 78 190 142 34 226 170 11
37581 -239 182 13 246 190 14 246 190 14 246 190 14
37582 -246 190 14 246 190 14 246 190 14 246 190 14
37583 -246 190 14 246 190 14 246 190 14 246 190 14
37584 -246 190 14 241 196 14 203 166 17 22 18 6
37585 - 2 2 6 2 2 6 2 2 6 38 38 38
37586 -218 218 218 253 253 253 253 253 253 253 253 253
37587 -253 253 253 253 253 253 253 253 253 253 253 253
37588 -253 253 253 253 253 253 253 253 253 253 253 253
37589 -253 253 253 253 253 253 253 253 253 253 253 253
37590 -250 250 250 206 206 206 198 198 198 202 162 69
37591 -226 170 11 236 178 12 224 166 10 210 150 10
37592 -200 144 11 197 138 11 192 133 9 197 138 11
37593 -210 150 10 226 170 11 242 186 14 246 190 14
37594 -246 190 14 246 186 14 225 175 15 124 112 88
37595 - 62 62 62 30 30 30 14 14 14 6 6 6
37596 - 0 0 0 0 0 0 0 0 0 0 0 0
37597 - 0 0 0 0 0 0 0 0 0 0 0 0
37598 - 0 0 0 0 0 0 0 0 0 0 0 0
37599 - 0 0 0 0 0 0 0 0 0 10 10 10
37600 - 30 30 30 78 78 78 174 135 50 224 166 10
37601 -239 182 13 246 190 14 246 190 14 246 190 14
37602 -246 190 14 246 190 14 246 190 14 246 190 14
37603 -246 190 14 246 190 14 246 190 14 246 190 14
37604 -246 190 14 246 190 14 241 196 14 139 102 15
37605 - 2 2 6 2 2 6 2 2 6 2 2 6
37606 - 78 78 78 250 250 250 253 253 253 253 253 253
37607 -253 253 253 253 253 253 253 253 253 253 253 253
37608 -253 253 253 253 253 253 253 253 253 253 253 253
37609 -253 253 253 253 253 253 253 253 253 253 253 253
37610 -250 250 250 214 214 214 198 198 198 190 150 46
37611 -219 162 10 236 178 12 234 174 13 224 166 10
37612 -216 158 10 213 154 11 213 154 11 216 158 10
37613 -226 170 11 239 182 13 246 190 14 246 190 14
37614 -246 190 14 246 190 14 242 186 14 206 162 42
37615 -101 101 101 58 58 58 30 30 30 14 14 14
37616 - 6 6 6 0 0 0 0 0 0 0 0 0
37617 - 0 0 0 0 0 0 0 0 0 0 0 0
37618 - 0 0 0 0 0 0 0 0 0 0 0 0
37619 - 0 0 0 0 0 0 0 0 0 10 10 10
37620 - 30 30 30 74 74 74 174 135 50 216 158 10
37621 -236 178 12 246 190 14 246 190 14 246 190 14
37622 -246 190 14 246 190 14 246 190 14 246 190 14
37623 -246 190 14 246 190 14 246 190 14 246 190 14
37624 -246 190 14 246 190 14 241 196 14 226 184 13
37625 - 61 42 6 2 2 6 2 2 6 2 2 6
37626 - 22 22 22 238 238 238 253 253 253 253 253 253
37627 -253 253 253 253 253 253 253 253 253 253 253 253
37628 -253 253 253 253 253 253 253 253 253 253 253 253
37629 -253 253 253 253 253 253 253 253 253 253 253 253
37630 -253 253 253 226 226 226 187 187 187 180 133 36
37631 -216 158 10 236 178 12 239 182 13 236 178 12
37632 -230 174 11 226 170 11 226 170 11 230 174 11
37633 -236 178 12 242 186 14 246 190 14 246 190 14
37634 -246 190 14 246 190 14 246 186 14 239 182 13
37635 -206 162 42 106 106 106 66 66 66 34 34 34
37636 - 14 14 14 6 6 6 0 0 0 0 0 0
37637 - 0 0 0 0 0 0 0 0 0 0 0 0
37638 - 0 0 0 0 0 0 0 0 0 0 0 0
37639 - 0 0 0 0 0 0 0 0 0 6 6 6
37640 - 26 26 26 70 70 70 163 133 67 213 154 11
37641 -236 178 12 246 190 14 246 190 14 246 190 14
37642 -246 190 14 246 190 14 246 190 14 246 190 14
37643 -246 190 14 246 190 14 246 190 14 246 190 14
37644 -246 190 14 246 190 14 246 190 14 241 196 14
37645 -190 146 13 18 14 6 2 2 6 2 2 6
37646 - 46 46 46 246 246 246 253 253 253 253 253 253
37647 -253 253 253 253 253 253 253 253 253 253 253 253
37648 -253 253 253 253 253 253 253 253 253 253 253 253
37649 -253 253 253 253 253 253 253 253 253 253 253 253
37650 -253 253 253 221 221 221 86 86 86 156 107 11
37651 -216 158 10 236 178 12 242 186 14 246 186 14
37652 -242 186 14 239 182 13 239 182 13 242 186 14
37653 -242 186 14 246 186 14 246 190 14 246 190 14
37654 -246 190 14 246 190 14 246 190 14 246 190 14
37655 -242 186 14 225 175 15 142 122 72 66 66 66
37656 - 30 30 30 10 10 10 0 0 0 0 0 0
37657 - 0 0 0 0 0 0 0 0 0 0 0 0
37658 - 0 0 0 0 0 0 0 0 0 0 0 0
37659 - 0 0 0 0 0 0 0 0 0 6 6 6
37660 - 26 26 26 70 70 70 163 133 67 210 150 10
37661 -236 178 12 246 190 14 246 190 14 246 190 14
37662 -246 190 14 246 190 14 246 190 14 246 190 14
37663 -246 190 14 246 190 14 246 190 14 246 190 14
37664 -246 190 14 246 190 14 246 190 14 246 190 14
37665 -232 195 16 121 92 8 34 34 34 106 106 106
37666 -221 221 221 253 253 253 253 253 253 253 253 253
37667 -253 253 253 253 253 253 253 253 253 253 253 253
37668 -253 253 253 253 253 253 253 253 253 253 253 253
37669 -253 253 253 253 253 253 253 253 253 253 253 253
37670 -242 242 242 82 82 82 18 14 6 163 110 8
37671 -216 158 10 236 178 12 242 186 14 246 190 14
37672 -246 190 14 246 190 14 246 190 14 246 190 14
37673 -246 190 14 246 190 14 246 190 14 246 190 14
37674 -246 190 14 246 190 14 246 190 14 246 190 14
37675 -246 190 14 246 190 14 242 186 14 163 133 67
37676 - 46 46 46 18 18 18 6 6 6 0 0 0
37677 - 0 0 0 0 0 0 0 0 0 0 0 0
37678 - 0 0 0 0 0 0 0 0 0 0 0 0
37679 - 0 0 0 0 0 0 0 0 0 10 10 10
37680 - 30 30 30 78 78 78 163 133 67 210 150 10
37681 -236 178 12 246 186 14 246 190 14 246 190 14
37682 -246 190 14 246 190 14 246 190 14 246 190 14
37683 -246 190 14 246 190 14 246 190 14 246 190 14
37684 -246 190 14 246 190 14 246 190 14 246 190 14
37685 -241 196 14 215 174 15 190 178 144 253 253 253
37686 -253 253 253 253 253 253 253 253 253 253 253 253
37687 -253 253 253 253 253 253 253 253 253 253 253 253
37688 -253 253 253 253 253 253 253 253 253 253 253 253
37689 -253 253 253 253 253 253 253 253 253 218 218 218
37690 - 58 58 58 2 2 6 22 18 6 167 114 7
37691 -216 158 10 236 178 12 246 186 14 246 190 14
37692 -246 190 14 246 190 14 246 190 14 246 190 14
37693 -246 190 14 246 190 14 246 190 14 246 190 14
37694 -246 190 14 246 190 14 246 190 14 246 190 14
37695 -246 190 14 246 186 14 242 186 14 190 150 46
37696 - 54 54 54 22 22 22 6 6 6 0 0 0
37697 - 0 0 0 0 0 0 0 0 0 0 0 0
37698 - 0 0 0 0 0 0 0 0 0 0 0 0
37699 - 0 0 0 0 0 0 0 0 0 14 14 14
37700 - 38 38 38 86 86 86 180 133 36 213 154 11
37701 -236 178 12 246 186 14 246 190 14 246 190 14
37702 -246 190 14 246 190 14 246 190 14 246 190 14
37703 -246 190 14 246 190 14 246 190 14 246 190 14
37704 -246 190 14 246 190 14 246 190 14 246 190 14
37705 -246 190 14 232 195 16 190 146 13 214 214 214
37706 -253 253 253 253 253 253 253 253 253 253 253 253
37707 -253 253 253 253 253 253 253 253 253 253 253 253
37708 -253 253 253 253 253 253 253 253 253 253 253 253
37709 -253 253 253 250 250 250 170 170 170 26 26 26
37710 - 2 2 6 2 2 6 37 26 9 163 110 8
37711 -219 162 10 239 182 13 246 186 14 246 190 14
37712 -246 190 14 246 190 14 246 190 14 246 190 14
37713 -246 190 14 246 190 14 246 190 14 246 190 14
37714 -246 190 14 246 190 14 246 190 14 246 190 14
37715 -246 186 14 236 178 12 224 166 10 142 122 72
37716 - 46 46 46 18 18 18 6 6 6 0 0 0
37717 - 0 0 0 0 0 0 0 0 0 0 0 0
37718 - 0 0 0 0 0 0 0 0 0 0 0 0
37719 - 0 0 0 0 0 0 6 6 6 18 18 18
37720 - 50 50 50 109 106 95 192 133 9 224 166 10
37721 -242 186 14 246 190 14 246 190 14 246 190 14
37722 -246 190 14 246 190 14 246 190 14 246 190 14
37723 -246 190 14 246 190 14 246 190 14 246 190 14
37724 -246 190 14 246 190 14 246 190 14 246 190 14
37725 -242 186 14 226 184 13 210 162 10 142 110 46
37726 -226 226 226 253 253 253 253 253 253 253 253 253
37727 -253 253 253 253 253 253 253 253 253 253 253 253
37728 -253 253 253 253 253 253 253 253 253 253 253 253
37729 -198 198 198 66 66 66 2 2 6 2 2 6
37730 - 2 2 6 2 2 6 50 34 6 156 107 11
37731 -219 162 10 239 182 13 246 186 14 246 190 14
37732 -246 190 14 246 190 14 246 190 14 246 190 14
37733 -246 190 14 246 190 14 246 190 14 246 190 14
37734 -246 190 14 246 190 14 246 190 14 242 186 14
37735 -234 174 13 213 154 11 154 122 46 66 66 66
37736 - 30 30 30 10 10 10 0 0 0 0 0 0
37737 - 0 0 0 0 0 0 0 0 0 0 0 0
37738 - 0 0 0 0 0 0 0 0 0 0 0 0
37739 - 0 0 0 0 0 0 6 6 6 22 22 22
37740 - 58 58 58 154 121 60 206 145 10 234 174 13
37741 -242 186 14 246 186 14 246 190 14 246 190 14
37742 -246 190 14 246 190 14 246 190 14 246 190 14
37743 -246 190 14 246 190 14 246 190 14 246 190 14
37744 -246 190 14 246 190 14 246 190 14 246 190 14
37745 -246 186 14 236 178 12 210 162 10 163 110 8
37746 - 61 42 6 138 138 138 218 218 218 250 250 250
37747 -253 253 253 253 253 253 253 253 253 250 250 250
37748 -242 242 242 210 210 210 144 144 144 66 66 66
37749 - 6 6 6 2 2 6 2 2 6 2 2 6
37750 - 2 2 6 2 2 6 61 42 6 163 110 8
37751 -216 158 10 236 178 12 246 190 14 246 190 14
37752 -246 190 14 246 190 14 246 190 14 246 190 14
37753 -246 190 14 246 190 14 246 190 14 246 190 14
37754 -246 190 14 239 182 13 230 174 11 216 158 10
37755 -190 142 34 124 112 88 70 70 70 38 38 38
37756 - 18 18 18 6 6 6 0 0 0 0 0 0
37757 - 0 0 0 0 0 0 0 0 0 0 0 0
37758 - 0 0 0 0 0 0 0 0 0 0 0 0
37759 - 0 0 0 0 0 0 6 6 6 22 22 22
37760 - 62 62 62 168 124 44 206 145 10 224 166 10
37761 -236 178 12 239 182 13 242 186 14 242 186 14
37762 -246 186 14 246 190 14 246 190 14 246 190 14
37763 -246 190 14 246 190 14 246 190 14 246 190 14
37764 -246 190 14 246 190 14 246 190 14 246 190 14
37765 -246 190 14 236 178 12 216 158 10 175 118 6
37766 - 80 54 7 2 2 6 6 6 6 30 30 30
37767 - 54 54 54 62 62 62 50 50 50 38 38 38
37768 - 14 14 14 2 2 6 2 2 6 2 2 6
37769 - 2 2 6 2 2 6 2 2 6 2 2 6
37770 - 2 2 6 6 6 6 80 54 7 167 114 7
37771 -213 154 11 236 178 12 246 190 14 246 190 14
37772 -246 190 14 246 190 14 246 190 14 246 190 14
37773 -246 190 14 242 186 14 239 182 13 239 182 13
37774 -230 174 11 210 150 10 174 135 50 124 112 88
37775 - 82 82 82 54 54 54 34 34 34 18 18 18
37776 - 6 6 6 0 0 0 0 0 0 0 0 0
37777 - 0 0 0 0 0 0 0 0 0 0 0 0
37778 - 0 0 0 0 0 0 0 0 0 0 0 0
37779 - 0 0 0 0 0 0 6 6 6 18 18 18
37780 - 50 50 50 158 118 36 192 133 9 200 144 11
37781 -216 158 10 219 162 10 224 166 10 226 170 11
37782 -230 174 11 236 178 12 239 182 13 239 182 13
37783 -242 186 14 246 186 14 246 190 14 246 190 14
37784 -246 190 14 246 190 14 246 190 14 246 190 14
37785 -246 186 14 230 174 11 210 150 10 163 110 8
37786 -104 69 6 10 10 10 2 2 6 2 2 6
37787 - 2 2 6 2 2 6 2 2 6 2 2 6
37788 - 2 2 6 2 2 6 2 2 6 2 2 6
37789 - 2 2 6 2 2 6 2 2 6 2 2 6
37790 - 2 2 6 6 6 6 91 60 6 167 114 7
37791 -206 145 10 230 174 11 242 186 14 246 190 14
37792 -246 190 14 246 190 14 246 186 14 242 186 14
37793 -239 182 13 230 174 11 224 166 10 213 154 11
37794 -180 133 36 124 112 88 86 86 86 58 58 58
37795 - 38 38 38 22 22 22 10 10 10 6 6 6
37796 - 0 0 0 0 0 0 0 0 0 0 0 0
37797 - 0 0 0 0 0 0 0 0 0 0 0 0
37798 - 0 0 0 0 0 0 0 0 0 0 0 0
37799 - 0 0 0 0 0 0 0 0 0 14 14 14
37800 - 34 34 34 70 70 70 138 110 50 158 118 36
37801 -167 114 7 180 123 7 192 133 9 197 138 11
37802 -200 144 11 206 145 10 213 154 11 219 162 10
37803 -224 166 10 230 174 11 239 182 13 242 186 14
37804 -246 186 14 246 186 14 246 186 14 246 186 14
37805 -239 182 13 216 158 10 185 133 11 152 99 6
37806 -104 69 6 18 14 6 2 2 6 2 2 6
37807 - 2 2 6 2 2 6 2 2 6 2 2 6
37808 - 2 2 6 2 2 6 2 2 6 2 2 6
37809 - 2 2 6 2 2 6 2 2 6 2 2 6
37810 - 2 2 6 6 6 6 80 54 7 152 99 6
37811 -192 133 9 219 162 10 236 178 12 239 182 13
37812 -246 186 14 242 186 14 239 182 13 236 178 12
37813 -224 166 10 206 145 10 192 133 9 154 121 60
37814 - 94 94 94 62 62 62 42 42 42 22 22 22
37815 - 14 14 14 6 6 6 0 0 0 0 0 0
37816 - 0 0 0 0 0 0 0 0 0 0 0 0
37817 - 0 0 0 0 0 0 0 0 0 0 0 0
37818 - 0 0 0 0 0 0 0 0 0 0 0 0
37819 - 0 0 0 0 0 0 0 0 0 6 6 6
37820 - 18 18 18 34 34 34 58 58 58 78 78 78
37821 -101 98 89 124 112 88 142 110 46 156 107 11
37822 -163 110 8 167 114 7 175 118 6 180 123 7
37823 -185 133 11 197 138 11 210 150 10 219 162 10
37824 -226 170 11 236 178 12 236 178 12 234 174 13
37825 -219 162 10 197 138 11 163 110 8 130 83 6
37826 - 91 60 6 10 10 10 2 2 6 2 2 6
37827 - 18 18 18 38 38 38 38 38 38 38 38 38
37828 - 38 38 38 38 38 38 38 38 38 38 38 38
37829 - 38 38 38 38 38 38 26 26 26 2 2 6
37830 - 2 2 6 6 6 6 70 47 6 137 92 6
37831 -175 118 6 200 144 11 219 162 10 230 174 11
37832 -234 174 13 230 174 11 219 162 10 210 150 10
37833 -192 133 9 163 110 8 124 112 88 82 82 82
37834 - 50 50 50 30 30 30 14 14 14 6 6 6
37835 - 0 0 0 0 0 0 0 0 0 0 0 0
37836 - 0 0 0 0 0 0 0 0 0 0 0 0
37837 - 0 0 0 0 0 0 0 0 0 0 0 0
37838 - 0 0 0 0 0 0 0 0 0 0 0 0
37839 - 0 0 0 0 0 0 0 0 0 0 0 0
37840 - 6 6 6 14 14 14 22 22 22 34 34 34
37841 - 42 42 42 58 58 58 74 74 74 86 86 86
37842 -101 98 89 122 102 70 130 98 46 121 87 25
37843 -137 92 6 152 99 6 163 110 8 180 123 7
37844 -185 133 11 197 138 11 206 145 10 200 144 11
37845 -180 123 7 156 107 11 130 83 6 104 69 6
37846 - 50 34 6 54 54 54 110 110 110 101 98 89
37847 - 86 86 86 82 82 82 78 78 78 78 78 78
37848 - 78 78 78 78 78 78 78 78 78 78 78 78
37849 - 78 78 78 82 82 82 86 86 86 94 94 94
37850 -106 106 106 101 101 101 86 66 34 124 80 6
37851 -156 107 11 180 123 7 192 133 9 200 144 11
37852 -206 145 10 200 144 11 192 133 9 175 118 6
37853 -139 102 15 109 106 95 70 70 70 42 42 42
37854 - 22 22 22 10 10 10 0 0 0 0 0 0
37855 - 0 0 0 0 0 0 0 0 0 0 0 0
37856 - 0 0 0 0 0 0 0 0 0 0 0 0
37857 - 0 0 0 0 0 0 0 0 0 0 0 0
37858 - 0 0 0 0 0 0 0 0 0 0 0 0
37859 - 0 0 0 0 0 0 0 0 0 0 0 0
37860 - 0 0 0 0 0 0 6 6 6 10 10 10
37861 - 14 14 14 22 22 22 30 30 30 38 38 38
37862 - 50 50 50 62 62 62 74 74 74 90 90 90
37863 -101 98 89 112 100 78 121 87 25 124 80 6
37864 -137 92 6 152 99 6 152 99 6 152 99 6
37865 -138 86 6 124 80 6 98 70 6 86 66 30
37866 -101 98 89 82 82 82 58 58 58 46 46 46
37867 - 38 38 38 34 34 34 34 34 34 34 34 34
37868 - 34 34 34 34 34 34 34 34 34 34 34 34
37869 - 34 34 34 34 34 34 38 38 38 42 42 42
37870 - 54 54 54 82 82 82 94 86 76 91 60 6
37871 -134 86 6 156 107 11 167 114 7 175 118 6
37872 -175 118 6 167 114 7 152 99 6 121 87 25
37873 -101 98 89 62 62 62 34 34 34 18 18 18
37874 - 6 6 6 0 0 0 0 0 0 0 0 0
37875 - 0 0 0 0 0 0 0 0 0 0 0 0
37876 - 0 0 0 0 0 0 0 0 0 0 0 0
37877 - 0 0 0 0 0 0 0 0 0 0 0 0
37878 - 0 0 0 0 0 0 0 0 0 0 0 0
37879 - 0 0 0 0 0 0 0 0 0 0 0 0
37880 - 0 0 0 0 0 0 0 0 0 0 0 0
37881 - 0 0 0 6 6 6 6 6 6 10 10 10
37882 - 18 18 18 22 22 22 30 30 30 42 42 42
37883 - 50 50 50 66 66 66 86 86 86 101 98 89
37884 -106 86 58 98 70 6 104 69 6 104 69 6
37885 -104 69 6 91 60 6 82 62 34 90 90 90
37886 - 62 62 62 38 38 38 22 22 22 14 14 14
37887 - 10 10 10 10 10 10 10 10 10 10 10 10
37888 - 10 10 10 10 10 10 6 6 6 10 10 10
37889 - 10 10 10 10 10 10 10 10 10 14 14 14
37890 - 22 22 22 42 42 42 70 70 70 89 81 66
37891 - 80 54 7 104 69 6 124 80 6 137 92 6
37892 -134 86 6 116 81 8 100 82 52 86 86 86
37893 - 58 58 58 30 30 30 14 14 14 6 6 6
37894 - 0 0 0 0 0 0 0 0 0 0 0 0
37895 - 0 0 0 0 0 0 0 0 0 0 0 0
37896 - 0 0 0 0 0 0 0 0 0 0 0 0
37897 - 0 0 0 0 0 0 0 0 0 0 0 0
37898 - 0 0 0 0 0 0 0 0 0 0 0 0
37899 - 0 0 0 0 0 0 0 0 0 0 0 0
37900 - 0 0 0 0 0 0 0 0 0 0 0 0
37901 - 0 0 0 0 0 0 0 0 0 0 0 0
37902 - 0 0 0 6 6 6 10 10 10 14 14 14
37903 - 18 18 18 26 26 26 38 38 38 54 54 54
37904 - 70 70 70 86 86 86 94 86 76 89 81 66
37905 - 89 81 66 86 86 86 74 74 74 50 50 50
37906 - 30 30 30 14 14 14 6 6 6 0 0 0
37907 - 0 0 0 0 0 0 0 0 0 0 0 0
37908 - 0 0 0 0 0 0 0 0 0 0 0 0
37909 - 0 0 0 0 0 0 0 0 0 0 0 0
37910 - 6 6 6 18 18 18 34 34 34 58 58 58
37911 - 82 82 82 89 81 66 89 81 66 89 81 66
37912 - 94 86 66 94 86 76 74 74 74 50 50 50
37913 - 26 26 26 14 14 14 6 6 6 0 0 0
37914 - 0 0 0 0 0 0 0 0 0 0 0 0
37915 - 0 0 0 0 0 0 0 0 0 0 0 0
37916 - 0 0 0 0 0 0 0 0 0 0 0 0
37917 - 0 0 0 0 0 0 0 0 0 0 0 0
37918 - 0 0 0 0 0 0 0 0 0 0 0 0
37919 - 0 0 0 0 0 0 0 0 0 0 0 0
37920 - 0 0 0 0 0 0 0 0 0 0 0 0
37921 - 0 0 0 0 0 0 0 0 0 0 0 0
37922 - 0 0 0 0 0 0 0 0 0 0 0 0
37923 - 6 6 6 6 6 6 14 14 14 18 18 18
37924 - 30 30 30 38 38 38 46 46 46 54 54 54
37925 - 50 50 50 42 42 42 30 30 30 18 18 18
37926 - 10 10 10 0 0 0 0 0 0 0 0 0
37927 - 0 0 0 0 0 0 0 0 0 0 0 0
37928 - 0 0 0 0 0 0 0 0 0 0 0 0
37929 - 0 0 0 0 0 0 0 0 0 0 0 0
37930 - 0 0 0 6 6 6 14 14 14 26 26 26
37931 - 38 38 38 50 50 50 58 58 58 58 58 58
37932 - 54 54 54 42 42 42 30 30 30 18 18 18
37933 - 10 10 10 0 0 0 0 0 0 0 0 0
37934 - 0 0 0 0 0 0 0 0 0 0 0 0
37935 - 0 0 0 0 0 0 0 0 0 0 0 0
37936 - 0 0 0 0 0 0 0 0 0 0 0 0
37937 - 0 0 0 0 0 0 0 0 0 0 0 0
37938 - 0 0 0 0 0 0 0 0 0 0 0 0
37939 - 0 0 0 0 0 0 0 0 0 0 0 0
37940 - 0 0 0 0 0 0 0 0 0 0 0 0
37941 - 0 0 0 0 0 0 0 0 0 0 0 0
37942 - 0 0 0 0 0 0 0 0 0 0 0 0
37943 - 0 0 0 0 0 0 0 0 0 6 6 6
37944 - 6 6 6 10 10 10 14 14 14 18 18 18
37945 - 18 18 18 14 14 14 10 10 10 6 6 6
37946 - 0 0 0 0 0 0 0 0 0 0 0 0
37947 - 0 0 0 0 0 0 0 0 0 0 0 0
37948 - 0 0 0 0 0 0 0 0 0 0 0 0
37949 - 0 0 0 0 0 0 0 0 0 0 0 0
37950 - 0 0 0 0 0 0 0 0 0 6 6 6
37951 - 14 14 14 18 18 18 22 22 22 22 22 22
37952 - 18 18 18 14 14 14 10 10 10 6 6 6
37953 - 0 0 0 0 0 0 0 0 0 0 0 0
37954 - 0 0 0 0 0 0 0 0 0 0 0 0
37955 - 0 0 0 0 0 0 0 0 0 0 0 0
37956 - 0 0 0 0 0 0 0 0 0 0 0 0
37957 - 0 0 0 0 0 0 0 0 0 0 0 0
37958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37963 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37971 +4 4 4 4 4 4
37972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37985 +4 4 4 4 4 4
37986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37999 +4 4 4 4 4 4
38000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38013 +4 4 4 4 4 4
38014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38027 +4 4 4 4 4 4
38028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38041 +4 4 4 4 4 4
38042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38046 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38047 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38051 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38052 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38053 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38055 +4 4 4 4 4 4
38056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38060 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38061 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38062 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38065 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38066 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38067 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38068 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38069 +4 4 4 4 4 4
38070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38074 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38075 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38076 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38079 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38080 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38081 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38082 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38083 +4 4 4 4 4 4
38084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38087 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38088 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38089 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38090 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38092 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38093 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38094 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38095 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38096 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38097 +4 4 4 4 4 4
38098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38102 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38103 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38104 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38105 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38106 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38107 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38108 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38109 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38110 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38111 +4 4 4 4 4 4
38112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38115 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38116 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38117 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38118 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38119 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38120 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38121 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38122 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38123 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38124 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38125 +4 4 4 4 4 4
38126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38128 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38129 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38130 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38131 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38132 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38133 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38134 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38135 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38136 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38137 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38138 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38139 +4 4 4 4 4 4
38140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38142 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38143 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38144 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38145 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38146 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38147 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38148 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38149 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38150 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38151 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38152 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38153 +4 4 4 4 4 4
38154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38156 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38157 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38158 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38159 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38160 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38161 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38162 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38163 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38164 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38165 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38166 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38167 +4 4 4 4 4 4
38168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38170 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38171 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38172 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38173 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38174 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38175 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38176 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38177 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38178 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38179 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38180 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38181 +4 4 4 4 4 4
38182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38183 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38184 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38185 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38186 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38187 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38188 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38189 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38190 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38191 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38192 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38193 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38194 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38195 +4 4 4 4 4 4
38196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38197 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38198 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38199 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38200 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38201 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38202 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38203 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38204 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38205 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38206 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38207 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38208 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38209 +0 0 0 4 4 4
38210 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38211 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38212 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38213 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38214 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38215 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38216 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38217 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38218 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38219 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38220 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38221 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38222 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38223 +2 0 0 0 0 0
38224 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38225 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38226 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38227 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38228 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38229 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38230 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38231 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38232 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38233 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38234 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38235 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38236 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38237 +37 38 37 0 0 0
38238 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38239 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38240 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38241 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38242 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38243 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38244 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38245 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38246 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38247 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38248 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38249 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38250 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38251 +85 115 134 4 0 0
38252 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38253 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38254 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38255 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38256 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38257 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38258 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38259 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38260 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38261 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38262 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38263 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38264 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38265 +60 73 81 4 0 0
38266 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38267 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38268 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38269 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38270 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38271 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38272 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38273 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38274 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38275 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38276 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38277 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38278 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38279 +16 19 21 4 0 0
38280 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38281 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38282 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38283 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38284 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38285 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38286 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38287 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38288 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38289 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38290 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38291 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38292 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38293 +4 0 0 4 3 3
38294 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38295 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38296 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38298 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38299 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38300 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38301 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38302 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38303 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38304 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38305 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38306 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38307 +3 2 2 4 4 4
38308 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38309 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38310 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38311 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38312 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38313 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38314 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38315 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38316 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38317 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38318 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38319 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38320 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38321 +4 4 4 4 4 4
38322 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38323 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38324 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38325 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38326 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38327 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38328 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38329 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38330 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38331 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38332 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38333 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38334 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38335 +4 4 4 4 4 4
38336 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38337 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38338 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38339 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38340 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38341 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38342 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38343 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38344 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38345 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38346 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38347 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38348 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38349 +5 5 5 5 5 5
38350 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38351 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38352 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38353 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38354 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38355 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38356 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38357 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38358 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38359 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38360 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38361 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38362 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38363 +5 5 5 4 4 4
38364 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38365 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38366 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38367 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38368 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38369 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38370 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38371 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38372 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38373 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38374 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38375 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38377 +4 4 4 4 4 4
38378 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38379 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38380 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38381 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38382 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38383 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38384 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38385 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38386 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38387 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38388 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38389 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38390 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38391 +4 4 4 4 4 4
38392 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38393 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38394 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38395 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38396 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38397 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38398 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38399 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38400 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38401 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38402 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38405 +4 4 4 4 4 4
38406 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38407 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38408 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38409 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38410 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38411 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38412 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38413 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38414 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38415 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38416 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38419 +4 4 4 4 4 4
38420 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38421 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38422 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38423 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38424 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38425 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38426 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38427 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38428 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38429 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38430 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38433 +4 4 4 4 4 4
38434 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38435 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38436 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38437 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38438 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38439 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38440 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38441 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38442 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38443 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38444 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38447 +4 4 4 4 4 4
38448 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38449 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38450 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38451 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38452 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38453 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38454 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38455 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38456 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38457 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38458 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38459 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38461 +4 4 4 4 4 4
38462 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38463 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38464 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38465 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38466 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38467 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38468 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38469 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38470 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38471 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38472 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38473 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38475 +4 4 4 4 4 4
38476 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38477 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38478 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38479 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38480 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38481 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38482 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38483 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38484 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38485 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38486 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38487 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38489 +4 4 4 4 4 4
38490 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38491 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38492 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38493 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38494 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38495 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38496 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38497 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38498 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38499 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38500 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38501 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38503 +4 4 4 4 4 4
38504 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38505 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38506 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38507 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38508 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38509 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38510 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38511 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38512 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38513 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38514 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38515 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38517 +4 4 4 4 4 4
38518 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38519 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38520 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38521 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38522 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38523 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38524 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38525 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38526 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38527 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38528 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38529 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38531 +4 4 4 4 4 4
38532 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38533 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38534 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38535 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38536 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38537 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38538 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38539 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38540 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38541 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38542 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38543 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38545 +4 4 4 4 4 4
38546 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38547 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38548 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38549 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38550 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38551 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38552 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38553 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38554 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38555 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38556 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38557 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38559 +4 4 4 4 4 4
38560 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38561 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38562 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38563 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38564 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38565 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38566 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38567 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38568 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38569 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38570 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38571 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38573 +4 4 4 4 4 4
38574 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38575 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38576 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38577 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38578 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38579 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38580 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38581 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38582 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38583 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38584 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38585 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38587 +4 4 4 4 4 4
38588 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38589 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38590 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38591 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38592 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38593 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38594 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38595 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38596 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38597 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38598 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38599 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38601 +4 4 4 4 4 4
38602 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38603 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38604 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38605 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38606 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38607 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38608 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38609 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38610 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38611 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38612 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38615 +4 4 4 4 4 4
38616 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38617 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38618 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38619 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38620 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38621 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38622 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38623 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38624 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38625 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38626 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38629 +4 4 4 4 4 4
38630 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38631 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38632 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38633 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38634 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38635 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38636 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38637 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38638 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38639 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38640 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38643 +4 4 4 4 4 4
38644 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38645 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38646 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38647 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38648 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38649 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38650 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38651 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38652 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38653 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38654 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38657 +4 4 4 4 4 4
38658 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38659 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38660 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38661 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38662 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38663 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38664 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38665 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38666 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38667 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38668 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38671 +4 4 4 4 4 4
38672 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38673 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38674 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38675 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38676 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38677 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38678 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38679 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38680 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38681 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38682 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38685 +4 4 4 4 4 4
38686 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38687 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38688 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38689 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38690 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38691 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38692 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38693 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38694 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38695 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38696 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38699 +4 4 4 4 4 4
38700 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38701 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38702 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38703 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38704 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38705 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38706 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38707 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38708 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38709 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38710 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38713 +4 4 4 4 4 4
38714 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38715 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38716 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38717 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38718 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38719 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38720 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38721 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38722 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38723 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38724 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38727 +4 4 4 4 4 4
38728 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38729 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38730 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38731 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38732 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38733 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38734 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38735 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38736 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38737 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38738 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38741 +4 4 4 4 4 4
38742 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38743 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38744 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38745 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38746 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38747 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38748 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38749 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38750 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38751 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38752 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38755 +4 4 4 4 4 4
38756 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38757 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38758 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38759 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38760 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38761 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38762 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38763 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38764 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38765 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38766 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38769 +4 4 4 4 4 4
38770 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38771 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38772 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38773 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38774 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38775 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38776 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38777 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38778 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38779 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38783 +4 4 4 4 4 4
38784 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38785 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38786 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38787 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38788 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38789 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38790 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38791 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38792 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38793 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38796 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38797 +4 4 4 4 4 4
38798 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38799 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38800 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38801 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38802 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38803 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38804 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38805 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38806 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38807 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38810 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38811 +4 4 4 4 4 4
38812 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38813 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38814 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38815 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38816 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38817 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38818 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38819 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38820 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38821 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38824 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38825 +4 4 4 4 4 4
38826 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38827 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38828 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38829 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38830 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38831 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38832 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38833 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38834 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38836 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38837 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38838 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38839 +4 4 4 4 4 4
38840 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38841 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38842 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38843 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38844 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38845 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38846 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38847 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38848 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38850 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38851 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38852 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38853 +4 4 4 4 4 4
38854 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38855 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38856 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38857 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38858 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38859 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38860 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38861 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38862 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38866 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38867 +4 4 4 4 4 4
38868 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38869 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38870 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38871 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38872 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38873 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38874 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38875 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38879 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38880 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38881 +4 4 4 4 4 4
38882 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38883 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38884 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38885 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38886 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38887 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38888 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38889 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38895 +4 4 4 4 4 4
38896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38897 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38898 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38899 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38900 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38901 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38902 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38903 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38909 +4 4 4 4 4 4
38910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38911 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
38912 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
38913 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
38914 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
38915 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
38916 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
38917 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
38918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38923 +4 4 4 4 4 4
38924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38925 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38926 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
38927 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38928 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
38929 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
38930 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
38931 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38935 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38937 +4 4 4 4 4 4
38938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38940 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38941 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
38942 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
38943 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
38944 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
38945 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38948 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38949 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38951 +4 4 4 4 4 4
38952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38955 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38956 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
38957 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
38958 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
38959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38963 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38965 +4 4 4 4 4 4
38966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38969 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38970 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38971 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
38972 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
38973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38979 +4 4 4 4 4 4
38980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38983 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38984 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38985 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38986 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
38987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38993 +4 4 4 4 4 4
38994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38997 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
38998 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
38999 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39000 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39007 +4 4 4 4 4 4
39008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39012 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39013 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39014 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39021 +4 4 4 4 4 4
39022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39026 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39027 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39028 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39035 +4 4 4 4 4 4
39036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39040 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39041 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39042 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39049 +4 4 4 4 4 4
39050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39054 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39055 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39063 +4 4 4 4 4 4
39064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39068 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39069 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39077 +4 4 4 4 4 4
39078 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39079 index 3473e75..c930142 100644
39080 --- a/drivers/video/udlfb.c
39081 +++ b/drivers/video/udlfb.c
39082 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39083 dlfb_urb_completion(urb);
39084
39085 error:
39086 - atomic_add(bytes_sent, &dev->bytes_sent);
39087 - atomic_add(bytes_identical, &dev->bytes_identical);
39088 - atomic_add(width*height*2, &dev->bytes_rendered);
39089 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39090 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39091 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39092 end_cycles = get_cycles();
39093 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39094 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39095 >> 10)), /* Kcycles */
39096 &dev->cpu_kcycles_used);
39097
39098 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39099 dlfb_urb_completion(urb);
39100
39101 error:
39102 - atomic_add(bytes_sent, &dev->bytes_sent);
39103 - atomic_add(bytes_identical, &dev->bytes_identical);
39104 - atomic_add(bytes_rendered, &dev->bytes_rendered);
39105 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39106 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39107 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39108 end_cycles = get_cycles();
39109 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39110 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39111 >> 10)), /* Kcycles */
39112 &dev->cpu_kcycles_used);
39113 }
39114 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39115 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39116 struct dlfb_data *dev = fb_info->par;
39117 return snprintf(buf, PAGE_SIZE, "%u\n",
39118 - atomic_read(&dev->bytes_rendered));
39119 + atomic_read_unchecked(&dev->bytes_rendered));
39120 }
39121
39122 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39123 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39124 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39125 struct dlfb_data *dev = fb_info->par;
39126 return snprintf(buf, PAGE_SIZE, "%u\n",
39127 - atomic_read(&dev->bytes_identical));
39128 + atomic_read_unchecked(&dev->bytes_identical));
39129 }
39130
39131 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39132 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39133 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39134 struct dlfb_data *dev = fb_info->par;
39135 return snprintf(buf, PAGE_SIZE, "%u\n",
39136 - atomic_read(&dev->bytes_sent));
39137 + atomic_read_unchecked(&dev->bytes_sent));
39138 }
39139
39140 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39141 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39142 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39143 struct dlfb_data *dev = fb_info->par;
39144 return snprintf(buf, PAGE_SIZE, "%u\n",
39145 - atomic_read(&dev->cpu_kcycles_used));
39146 + atomic_read_unchecked(&dev->cpu_kcycles_used));
39147 }
39148
39149 static ssize_t edid_show(
39150 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39151 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39152 struct dlfb_data *dev = fb_info->par;
39153
39154 - atomic_set(&dev->bytes_rendered, 0);
39155 - atomic_set(&dev->bytes_identical, 0);
39156 - atomic_set(&dev->bytes_sent, 0);
39157 - atomic_set(&dev->cpu_kcycles_used, 0);
39158 + atomic_set_unchecked(&dev->bytes_rendered, 0);
39159 + atomic_set_unchecked(&dev->bytes_identical, 0);
39160 + atomic_set_unchecked(&dev->bytes_sent, 0);
39161 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39162
39163 return count;
39164 }
39165 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39166 index 7f8472c..9842e87 100644
39167 --- a/drivers/video/uvesafb.c
39168 +++ b/drivers/video/uvesafb.c
39169 @@ -19,6 +19,7 @@
39170 #include <linux/io.h>
39171 #include <linux/mutex.h>
39172 #include <linux/slab.h>
39173 +#include <linux/moduleloader.h>
39174 #include <video/edid.h>
39175 #include <video/uvesafb.h>
39176 #ifdef CONFIG_X86
39177 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39178 NULL,
39179 };
39180
39181 - return call_usermodehelper(v86d_path, argv, envp, 1);
39182 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39183 }
39184
39185 /*
39186 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39187 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39188 par->pmi_setpal = par->ypan = 0;
39189 } else {
39190 +
39191 +#ifdef CONFIG_PAX_KERNEXEC
39192 +#ifdef CONFIG_MODULES
39193 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39194 +#endif
39195 + if (!par->pmi_code) {
39196 + par->pmi_setpal = par->ypan = 0;
39197 + return 0;
39198 + }
39199 +#endif
39200 +
39201 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39202 + task->t.regs.edi);
39203 +
39204 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39205 + pax_open_kernel();
39206 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39207 + pax_close_kernel();
39208 +
39209 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39210 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39211 +#else
39212 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39213 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39214 +#endif
39215 +
39216 printk(KERN_INFO "uvesafb: protected mode interface info at "
39217 "%04x:%04x\n",
39218 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39219 @@ -1821,6 +1844,11 @@ out:
39220 if (par->vbe_modes)
39221 kfree(par->vbe_modes);
39222
39223 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39224 + if (par->pmi_code)
39225 + module_free_exec(NULL, par->pmi_code);
39226 +#endif
39227 +
39228 framebuffer_release(info);
39229 return err;
39230 }
39231 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39232 kfree(par->vbe_state_orig);
39233 if (par->vbe_state_saved)
39234 kfree(par->vbe_state_saved);
39235 +
39236 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39237 + if (par->pmi_code)
39238 + module_free_exec(NULL, par->pmi_code);
39239 +#endif
39240 +
39241 }
39242
39243 framebuffer_release(info);
39244 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39245 index 501b340..86bd4cf 100644
39246 --- a/drivers/video/vesafb.c
39247 +++ b/drivers/video/vesafb.c
39248 @@ -9,6 +9,7 @@
39249 */
39250
39251 #include <linux/module.h>
39252 +#include <linux/moduleloader.h>
39253 #include <linux/kernel.h>
39254 #include <linux/errno.h>
39255 #include <linux/string.h>
39256 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39257 static int vram_total __initdata; /* Set total amount of memory */
39258 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39259 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39260 -static void (*pmi_start)(void) __read_mostly;
39261 -static void (*pmi_pal) (void) __read_mostly;
39262 +static void (*pmi_start)(void) __read_only;
39263 +static void (*pmi_pal) (void) __read_only;
39264 static int depth __read_mostly;
39265 static int vga_compat __read_mostly;
39266 /* --------------------------------------------------------------------- */
39267 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39268 unsigned int size_vmode;
39269 unsigned int size_remap;
39270 unsigned int size_total;
39271 + void *pmi_code = NULL;
39272
39273 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39274 return -ENODEV;
39275 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39276 size_remap = size_total;
39277 vesafb_fix.smem_len = size_remap;
39278
39279 -#ifndef __i386__
39280 - screen_info.vesapm_seg = 0;
39281 -#endif
39282 -
39283 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39284 printk(KERN_WARNING
39285 "vesafb: cannot reserve video memory at 0x%lx\n",
39286 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39287 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39288 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39289
39290 +#ifdef __i386__
39291 +
39292 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39293 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
39294 + if (!pmi_code)
39295 +#elif !defined(CONFIG_PAX_KERNEXEC)
39296 + if (0)
39297 +#endif
39298 +
39299 +#endif
39300 + screen_info.vesapm_seg = 0;
39301 +
39302 if (screen_info.vesapm_seg) {
39303 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39304 - screen_info.vesapm_seg,screen_info.vesapm_off);
39305 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39306 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39307 }
39308
39309 if (screen_info.vesapm_seg < 0xc000)
39310 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39311
39312 if (ypan || pmi_setpal) {
39313 unsigned short *pmi_base;
39314 +
39315 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39316 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39317 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39318 +
39319 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39320 + pax_open_kernel();
39321 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39322 +#else
39323 + pmi_code = pmi_base;
39324 +#endif
39325 +
39326 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39327 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39328 +
39329 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39330 + pmi_start = ktva_ktla(pmi_start);
39331 + pmi_pal = ktva_ktla(pmi_pal);
39332 + pax_close_kernel();
39333 +#endif
39334 +
39335 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39336 if (pmi_base[3]) {
39337 printk(KERN_INFO "vesafb: pmi: ports = ");
39338 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39339 info->node, info->fix.id);
39340 return 0;
39341 err:
39342 +
39343 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39344 + module_free_exec(NULL, pmi_code);
39345 +#endif
39346 +
39347 if (info->screen_base)
39348 iounmap(info->screen_base);
39349 framebuffer_release(info);
39350 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39351 index 88714ae..16c2e11 100644
39352 --- a/drivers/video/via/via_clock.h
39353 +++ b/drivers/video/via/via_clock.h
39354 @@ -56,7 +56,7 @@ struct via_clock {
39355
39356 void (*set_engine_pll_state)(u8 state);
39357 void (*set_engine_pll)(struct via_pll_config config);
39358 -};
39359 +} __no_const;
39360
39361
39362 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39363 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39364 index e56c934..fc22f4b 100644
39365 --- a/drivers/xen/xen-pciback/conf_space.h
39366 +++ b/drivers/xen/xen-pciback/conf_space.h
39367 @@ -44,15 +44,15 @@ struct config_field {
39368 struct {
39369 conf_dword_write write;
39370 conf_dword_read read;
39371 - } dw;
39372 + } __no_const dw;
39373 struct {
39374 conf_word_write write;
39375 conf_word_read read;
39376 - } w;
39377 + } __no_const w;
39378 struct {
39379 conf_byte_write write;
39380 conf_byte_read read;
39381 - } b;
39382 + } __no_const b;
39383 } u;
39384 struct list_head list;
39385 };
39386 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39387 index 879ed88..bc03a01 100644
39388 --- a/fs/9p/vfs_inode.c
39389 +++ b/fs/9p/vfs_inode.c
39390 @@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39391 void
39392 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39393 {
39394 - char *s = nd_get_link(nd);
39395 + const char *s = nd_get_link(nd);
39396
39397 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39398 IS_ERR(s) ? "<error>" : s);
39399 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39400 index 79e2ca7..5828ad1 100644
39401 --- a/fs/Kconfig.binfmt
39402 +++ b/fs/Kconfig.binfmt
39403 @@ -86,7 +86,7 @@ config HAVE_AOUT
39404
39405 config BINFMT_AOUT
39406 tristate "Kernel support for a.out and ECOFF binaries"
39407 - depends on HAVE_AOUT
39408 + depends on HAVE_AOUT && BROKEN
39409 ---help---
39410 A.out (Assembler.OUTput) is a set of formats for libraries and
39411 executables used in the earliest versions of UNIX. Linux used
39412 diff --git a/fs/aio.c b/fs/aio.c
39413 index 969beb0..09fab51 100644
39414 --- a/fs/aio.c
39415 +++ b/fs/aio.c
39416 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39417 size += sizeof(struct io_event) * nr_events;
39418 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39419
39420 - if (nr_pages < 0)
39421 + if (nr_pages <= 0)
39422 return -EINVAL;
39423
39424 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39425 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39426 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39427 {
39428 ssize_t ret;
39429 + struct iovec iovstack;
39430
39431 #ifdef CONFIG_COMPAT
39432 if (compat)
39433 ret = compat_rw_copy_check_uvector(type,
39434 (struct compat_iovec __user *)kiocb->ki_buf,
39435 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39436 + kiocb->ki_nbytes, 1, &iovstack,
39437 &kiocb->ki_iovec, 1);
39438 else
39439 #endif
39440 ret = rw_copy_check_uvector(type,
39441 (struct iovec __user *)kiocb->ki_buf,
39442 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39443 + kiocb->ki_nbytes, 1, &iovstack,
39444 &kiocb->ki_iovec, 1);
39445 if (ret < 0)
39446 goto out;
39447
39448 + if (kiocb->ki_iovec == &iovstack) {
39449 + kiocb->ki_inline_vec = iovstack;
39450 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39451 + }
39452 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39453 kiocb->ki_cur_seg = 0;
39454 /* ki_nbytes/left now reflect bytes instead of segs */
39455 diff --git a/fs/attr.c b/fs/attr.c
39456 index 7ee7ba4..0c61a60 100644
39457 --- a/fs/attr.c
39458 +++ b/fs/attr.c
39459 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39460 unsigned long limit;
39461
39462 limit = rlimit(RLIMIT_FSIZE);
39463 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39464 if (limit != RLIM_INFINITY && offset > limit)
39465 goto out_sig;
39466 if (offset > inode->i_sb->s_maxbytes)
39467 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39468 index e1fbdee..cd5ea56 100644
39469 --- a/fs/autofs4/waitq.c
39470 +++ b/fs/autofs4/waitq.c
39471 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39472 {
39473 unsigned long sigpipe, flags;
39474 mm_segment_t fs;
39475 - const char *data = (const char *)addr;
39476 + const char __user *data = (const char __force_user *)addr;
39477 ssize_t wr = 0;
39478
39479 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39480 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39481 index 8342ca6..82fd192 100644
39482 --- a/fs/befs/linuxvfs.c
39483 +++ b/fs/befs/linuxvfs.c
39484 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39485 {
39486 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39487 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39488 - char *link = nd_get_link(nd);
39489 + const char *link = nd_get_link(nd);
39490 if (!IS_ERR(link))
39491 kfree(link);
39492 }
39493 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39494 index a6395bd..a5b24c4 100644
39495 --- a/fs/binfmt_aout.c
39496 +++ b/fs/binfmt_aout.c
39497 @@ -16,6 +16,7 @@
39498 #include <linux/string.h>
39499 #include <linux/fs.h>
39500 #include <linux/file.h>
39501 +#include <linux/security.h>
39502 #include <linux/stat.h>
39503 #include <linux/fcntl.h>
39504 #include <linux/ptrace.h>
39505 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39506 #endif
39507 # define START_STACK(u) ((void __user *)u.start_stack)
39508
39509 + memset(&dump, 0, sizeof(dump));
39510 +
39511 fs = get_fs();
39512 set_fs(KERNEL_DS);
39513 has_dumped = 1;
39514 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39515
39516 /* If the size of the dump file exceeds the rlimit, then see what would happen
39517 if we wrote the stack, but not the data area. */
39518 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39519 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39520 dump.u_dsize = 0;
39521
39522 /* Make sure we have enough room to write the stack and data areas. */
39523 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39524 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39525 dump.u_ssize = 0;
39526
39527 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39528 rlim = rlimit(RLIMIT_DATA);
39529 if (rlim >= RLIM_INFINITY)
39530 rlim = ~0;
39531 +
39532 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39533 if (ex.a_data + ex.a_bss > rlim)
39534 return -ENOMEM;
39535
39536 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39537 install_exec_creds(bprm);
39538 current->flags &= ~PF_FORKNOEXEC;
39539
39540 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39541 + current->mm->pax_flags = 0UL;
39542 +#endif
39543 +
39544 +#ifdef CONFIG_PAX_PAGEEXEC
39545 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39546 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39547 +
39548 +#ifdef CONFIG_PAX_EMUTRAMP
39549 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39550 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39551 +#endif
39552 +
39553 +#ifdef CONFIG_PAX_MPROTECT
39554 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39555 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39556 +#endif
39557 +
39558 + }
39559 +#endif
39560 +
39561 if (N_MAGIC(ex) == OMAGIC) {
39562 unsigned long text_addr, map_size;
39563 loff_t pos;
39564 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39565
39566 down_write(&current->mm->mmap_sem);
39567 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39568 - PROT_READ | PROT_WRITE | PROT_EXEC,
39569 + PROT_READ | PROT_WRITE,
39570 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39571 fd_offset + ex.a_text);
39572 up_write(&current->mm->mmap_sem);
39573 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39574 index 21ac5ee..c1090ea 100644
39575 --- a/fs/binfmt_elf.c
39576 +++ b/fs/binfmt_elf.c
39577 @@ -32,6 +32,7 @@
39578 #include <linux/elf.h>
39579 #include <linux/utsname.h>
39580 #include <linux/coredump.h>
39581 +#include <linux/xattr.h>
39582 #include <asm/uaccess.h>
39583 #include <asm/param.h>
39584 #include <asm/page.h>
39585 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39586 #define elf_core_dump NULL
39587 #endif
39588
39589 +#ifdef CONFIG_PAX_MPROTECT
39590 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39591 +#endif
39592 +
39593 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39594 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39595 #else
39596 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39597 .load_binary = load_elf_binary,
39598 .load_shlib = load_elf_library,
39599 .core_dump = elf_core_dump,
39600 +
39601 +#ifdef CONFIG_PAX_MPROTECT
39602 + .handle_mprotect= elf_handle_mprotect,
39603 +#endif
39604 +
39605 .min_coredump = ELF_EXEC_PAGESIZE,
39606 };
39607
39608 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39609
39610 static int set_brk(unsigned long start, unsigned long end)
39611 {
39612 + unsigned long e = end;
39613 +
39614 start = ELF_PAGEALIGN(start);
39615 end = ELF_PAGEALIGN(end);
39616 if (end > start) {
39617 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39618 if (BAD_ADDR(addr))
39619 return addr;
39620 }
39621 - current->mm->start_brk = current->mm->brk = end;
39622 + current->mm->start_brk = current->mm->brk = e;
39623 return 0;
39624 }
39625
39626 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39627 elf_addr_t __user *u_rand_bytes;
39628 const char *k_platform = ELF_PLATFORM;
39629 const char *k_base_platform = ELF_BASE_PLATFORM;
39630 - unsigned char k_rand_bytes[16];
39631 + u32 k_rand_bytes[4];
39632 int items;
39633 elf_addr_t *elf_info;
39634 int ei_index = 0;
39635 const struct cred *cred = current_cred();
39636 struct vm_area_struct *vma;
39637 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39638
39639 /*
39640 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39641 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39642 * Generate 16 random bytes for userspace PRNG seeding.
39643 */
39644 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39645 - u_rand_bytes = (elf_addr_t __user *)
39646 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39647 + srandom32(k_rand_bytes[0] ^ random32());
39648 + srandom32(k_rand_bytes[1] ^ random32());
39649 + srandom32(k_rand_bytes[2] ^ random32());
39650 + srandom32(k_rand_bytes[3] ^ random32());
39651 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39652 + u_rand_bytes = (elf_addr_t __user *) p;
39653 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39654 return -EFAULT;
39655
39656 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39657 return -EFAULT;
39658 current->mm->env_end = p;
39659
39660 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39661 +
39662 /* Put the elf_info on the stack in the right place. */
39663 sp = (elf_addr_t __user *)envp + 1;
39664 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39665 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39666 return -EFAULT;
39667 return 0;
39668 }
39669 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39670 {
39671 struct elf_phdr *elf_phdata;
39672 struct elf_phdr *eppnt;
39673 - unsigned long load_addr = 0;
39674 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39675 int load_addr_set = 0;
39676 unsigned long last_bss = 0, elf_bss = 0;
39677 - unsigned long error = ~0UL;
39678 + unsigned long error = -EINVAL;
39679 unsigned long total_size;
39680 int retval, i, size;
39681
39682 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39683 goto out_close;
39684 }
39685
39686 +#ifdef CONFIG_PAX_SEGMEXEC
39687 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39688 + pax_task_size = SEGMEXEC_TASK_SIZE;
39689 +#endif
39690 +
39691 eppnt = elf_phdata;
39692 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39693 if (eppnt->p_type == PT_LOAD) {
39694 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39695 k = load_addr + eppnt->p_vaddr;
39696 if (BAD_ADDR(k) ||
39697 eppnt->p_filesz > eppnt->p_memsz ||
39698 - eppnt->p_memsz > TASK_SIZE ||
39699 - TASK_SIZE - eppnt->p_memsz < k) {
39700 + eppnt->p_memsz > pax_task_size ||
39701 + pax_task_size - eppnt->p_memsz < k) {
39702 error = -ENOMEM;
39703 goto out_close;
39704 }
39705 @@ -528,6 +552,348 @@ out:
39706 return error;
39707 }
39708
39709 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
39710 +{
39711 + unsigned long pax_flags = 0UL;
39712 +
39713 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39714 +
39715 +#ifdef CONFIG_PAX_PAGEEXEC
39716 + if (elf_phdata->p_flags & PF_PAGEEXEC)
39717 + pax_flags |= MF_PAX_PAGEEXEC;
39718 +#endif
39719 +
39720 +#ifdef CONFIG_PAX_SEGMEXEC
39721 + if (elf_phdata->p_flags & PF_SEGMEXEC)
39722 + pax_flags |= MF_PAX_SEGMEXEC;
39723 +#endif
39724 +
39725 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39726 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39727 + if ((__supported_pte_mask & _PAGE_NX))
39728 + pax_flags &= ~MF_PAX_SEGMEXEC;
39729 + else
39730 + pax_flags &= ~MF_PAX_PAGEEXEC;
39731 + }
39732 +#endif
39733 +
39734 +#ifdef CONFIG_PAX_EMUTRAMP
39735 + if (elf_phdata->p_flags & PF_EMUTRAMP)
39736 + pax_flags |= MF_PAX_EMUTRAMP;
39737 +#endif
39738 +
39739 +#ifdef CONFIG_PAX_MPROTECT
39740 + if (elf_phdata->p_flags & PF_MPROTECT)
39741 + pax_flags |= MF_PAX_MPROTECT;
39742 +#endif
39743 +
39744 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39745 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39746 + pax_flags |= MF_PAX_RANDMMAP;
39747 +#endif
39748 +
39749 +#endif
39750 +
39751 + return pax_flags;
39752 +}
39753 +
39754 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
39755 +{
39756 + unsigned long pax_flags = 0UL;
39757 +
39758 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39759 +
39760 +#ifdef CONFIG_PAX_PAGEEXEC
39761 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39762 + pax_flags |= MF_PAX_PAGEEXEC;
39763 +#endif
39764 +
39765 +#ifdef CONFIG_PAX_SEGMEXEC
39766 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39767 + pax_flags |= MF_PAX_SEGMEXEC;
39768 +#endif
39769 +
39770 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39771 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39772 + if ((__supported_pte_mask & _PAGE_NX))
39773 + pax_flags &= ~MF_PAX_SEGMEXEC;
39774 + else
39775 + pax_flags &= ~MF_PAX_PAGEEXEC;
39776 + }
39777 +#endif
39778 +
39779 +#ifdef CONFIG_PAX_EMUTRAMP
39780 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39781 + pax_flags |= MF_PAX_EMUTRAMP;
39782 +#endif
39783 +
39784 +#ifdef CONFIG_PAX_MPROTECT
39785 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39786 + pax_flags |= MF_PAX_MPROTECT;
39787 +#endif
39788 +
39789 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39790 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39791 + pax_flags |= MF_PAX_RANDMMAP;
39792 +#endif
39793 +
39794 +#endif
39795 +
39796 + return pax_flags;
39797 +}
39798 +
39799 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39800 +{
39801 + unsigned long pax_flags = 0UL;
39802 +
39803 +#ifdef CONFIG_PAX_EI_PAX
39804 +
39805 +#ifdef CONFIG_PAX_PAGEEXEC
39806 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39807 + pax_flags |= MF_PAX_PAGEEXEC;
39808 +#endif
39809 +
39810 +#ifdef CONFIG_PAX_SEGMEXEC
39811 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39812 + pax_flags |= MF_PAX_SEGMEXEC;
39813 +#endif
39814 +
39815 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39816 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39817 + if ((__supported_pte_mask & _PAGE_NX))
39818 + pax_flags &= ~MF_PAX_SEGMEXEC;
39819 + else
39820 + pax_flags &= ~MF_PAX_PAGEEXEC;
39821 + }
39822 +#endif
39823 +
39824 +#ifdef CONFIG_PAX_EMUTRAMP
39825 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39826 + pax_flags |= MF_PAX_EMUTRAMP;
39827 +#endif
39828 +
39829 +#ifdef CONFIG_PAX_MPROTECT
39830 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39831 + pax_flags |= MF_PAX_MPROTECT;
39832 +#endif
39833 +
39834 +#ifdef CONFIG_PAX_ASLR
39835 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39836 + pax_flags |= MF_PAX_RANDMMAP;
39837 +#endif
39838 +
39839 +#else
39840 +
39841 +#ifdef CONFIG_PAX_PAGEEXEC
39842 + pax_flags |= MF_PAX_PAGEEXEC;
39843 +#endif
39844 +
39845 +#ifdef CONFIG_PAX_MPROTECT
39846 + pax_flags |= MF_PAX_MPROTECT;
39847 +#endif
39848 +
39849 +#ifdef CONFIG_PAX_RANDMMAP
39850 + pax_flags |= MF_PAX_RANDMMAP;
39851 +#endif
39852 +
39853 +#ifdef CONFIG_PAX_SEGMEXEC
39854 + if (!(__supported_pte_mask & _PAGE_NX)) {
39855 + pax_flags &= ~MF_PAX_PAGEEXEC;
39856 + pax_flags |= MF_PAX_SEGMEXEC;
39857 + }
39858 +#endif
39859 +
39860 +#endif
39861 +
39862 + return pax_flags;
39863 +}
39864 +
39865 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39866 +{
39867 +
39868 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39869 + unsigned long i;
39870 +
39871 + for (i = 0UL; i < elf_ex->e_phnum; i++)
39872 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39873 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39874 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39875 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39876 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39877 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39878 + return ~0UL;
39879 +
39880 +#ifdef CONFIG_PAX_SOFTMODE
39881 + if (pax_softmode)
39882 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
39883 + else
39884 +#endif
39885 +
39886 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
39887 + break;
39888 + }
39889 +#endif
39890 +
39891 + return ~0UL;
39892 +}
39893 +
39894 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
39895 +{
39896 + unsigned long pax_flags = 0UL;
39897 +
39898 +#ifdef CONFIG_PAX_PAGEEXEC
39899 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
39900 + pax_flags |= MF_PAX_PAGEEXEC;
39901 +#endif
39902 +
39903 +#ifdef CONFIG_PAX_SEGMEXEC
39904 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
39905 + pax_flags |= MF_PAX_SEGMEXEC;
39906 +#endif
39907 +
39908 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39909 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39910 + if ((__supported_pte_mask & _PAGE_NX))
39911 + pax_flags &= ~MF_PAX_SEGMEXEC;
39912 + else
39913 + pax_flags &= ~MF_PAX_PAGEEXEC;
39914 + }
39915 +#endif
39916 +
39917 +#ifdef CONFIG_PAX_EMUTRAMP
39918 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
39919 + pax_flags |= MF_PAX_EMUTRAMP;
39920 +#endif
39921 +
39922 +#ifdef CONFIG_PAX_MPROTECT
39923 + if (pax_flags_softmode & MF_PAX_MPROTECT)
39924 + pax_flags |= MF_PAX_MPROTECT;
39925 +#endif
39926 +
39927 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39928 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
39929 + pax_flags |= MF_PAX_RANDMMAP;
39930 +#endif
39931 +
39932 + return pax_flags;
39933 +}
39934 +
39935 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
39936 +{
39937 + unsigned long pax_flags = 0UL;
39938 +
39939 +#ifdef CONFIG_PAX_PAGEEXEC
39940 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
39941 + pax_flags |= MF_PAX_PAGEEXEC;
39942 +#endif
39943 +
39944 +#ifdef CONFIG_PAX_SEGMEXEC
39945 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
39946 + pax_flags |= MF_PAX_SEGMEXEC;
39947 +#endif
39948 +
39949 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39950 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39951 + if ((__supported_pte_mask & _PAGE_NX))
39952 + pax_flags &= ~MF_PAX_SEGMEXEC;
39953 + else
39954 + pax_flags &= ~MF_PAX_PAGEEXEC;
39955 + }
39956 +#endif
39957 +
39958 +#ifdef CONFIG_PAX_EMUTRAMP
39959 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
39960 + pax_flags |= MF_PAX_EMUTRAMP;
39961 +#endif
39962 +
39963 +#ifdef CONFIG_PAX_MPROTECT
39964 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
39965 + pax_flags |= MF_PAX_MPROTECT;
39966 +#endif
39967 +
39968 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39969 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
39970 + pax_flags |= MF_PAX_RANDMMAP;
39971 +#endif
39972 +
39973 + return pax_flags;
39974 +}
39975 +
39976 +static unsigned long pax_parse_xattr_pax(struct file * const file)
39977 +{
39978 +
39979 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
39980 + ssize_t xattr_size, i;
39981 + unsigned char xattr_value[5];
39982 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
39983 +
39984 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
39985 + if (xattr_size <= 0)
39986 + return ~0UL;
39987 +
39988 + for (i = 0; i < xattr_size; i++)
39989 + switch (xattr_value[i]) {
39990 + default:
39991 + return ~0UL;
39992 +
39993 +#define parse_flag(option1, option2, flag) \
39994 + case option1: \
39995 + pax_flags_hardmode |= MF_PAX_##flag; \
39996 + break; \
39997 + case option2: \
39998 + pax_flags_softmode |= MF_PAX_##flag; \
39999 + break;
40000 +
40001 + parse_flag('p', 'P', PAGEEXEC);
40002 + parse_flag('e', 'E', EMUTRAMP);
40003 + parse_flag('m', 'M', MPROTECT);
40004 + parse_flag('r', 'R', RANDMMAP);
40005 + parse_flag('s', 'S', SEGMEXEC);
40006 +
40007 +#undef parse_flag
40008 + }
40009 +
40010 + if (pax_flags_hardmode & pax_flags_softmode)
40011 + return ~0UL;
40012 +
40013 +#ifdef CONFIG_PAX_SOFTMODE
40014 + if (pax_softmode)
40015 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40016 + else
40017 +#endif
40018 +
40019 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40020 +#else
40021 + return ~0UL;
40022 +#endif
40023 +}
40024 +
40025 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40026 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40027 +{
40028 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40029 +
40030 + pax_flags = pax_parse_ei_pax(elf_ex);
40031 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40032 + xattr_pax_flags = pax_parse_xattr_pax(file);
40033 +
40034 + if (pt_pax_flags == ~0UL)
40035 + pt_pax_flags = xattr_pax_flags;
40036 + else if (xattr_pax_flags == ~0UL)
40037 + xattr_pax_flags = pt_pax_flags;
40038 + if (pt_pax_flags != xattr_pax_flags)
40039 + return -EINVAL;
40040 + if (pt_pax_flags != ~0UL)
40041 + pax_flags = pt_pax_flags;
40042 +
40043 + if (0 > pax_check_flags(&pax_flags))
40044 + return -EINVAL;
40045 +
40046 + current->mm->pax_flags = pax_flags;
40047 + return 0;
40048 +}
40049 +#endif
40050 +
40051 /*
40052 * These are the functions used to load ELF style executables and shared
40053 * libraries. There is no binary dependent code anywhere else.
40054 @@ -544,6 +910,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40055 {
40056 unsigned int random_variable = 0;
40057
40058 +#ifdef CONFIG_PAX_RANDUSTACK
40059 + if (randomize_va_space)
40060 + return stack_top - current->mm->delta_stack;
40061 +#endif
40062 +
40063 if ((current->flags & PF_RANDOMIZE) &&
40064 !(current->personality & ADDR_NO_RANDOMIZE)) {
40065 random_variable = get_random_int() & STACK_RND_MASK;
40066 @@ -562,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40067 unsigned long load_addr = 0, load_bias = 0;
40068 int load_addr_set = 0;
40069 char * elf_interpreter = NULL;
40070 - unsigned long error;
40071 + unsigned long error = 0;
40072 struct elf_phdr *elf_ppnt, *elf_phdata;
40073 unsigned long elf_bss, elf_brk;
40074 int retval, i;
40075 @@ -572,11 +943,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40076 unsigned long start_code, end_code, start_data, end_data;
40077 unsigned long reloc_func_desc __maybe_unused = 0;
40078 int executable_stack = EXSTACK_DEFAULT;
40079 - unsigned long def_flags = 0;
40080 struct {
40081 struct elfhdr elf_ex;
40082 struct elfhdr interp_elf_ex;
40083 } *loc;
40084 + unsigned long pax_task_size = TASK_SIZE;
40085
40086 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40087 if (!loc) {
40088 @@ -713,11 +1084,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40089
40090 /* OK, This is the point of no return */
40091 current->flags &= ~PF_FORKNOEXEC;
40092 - current->mm->def_flags = def_flags;
40093 +
40094 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40095 + current->mm->pax_flags = 0UL;
40096 +#endif
40097 +
40098 +#ifdef CONFIG_PAX_DLRESOLVE
40099 + current->mm->call_dl_resolve = 0UL;
40100 +#endif
40101 +
40102 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40103 + current->mm->call_syscall = 0UL;
40104 +#endif
40105 +
40106 +#ifdef CONFIG_PAX_ASLR
40107 + current->mm->delta_mmap = 0UL;
40108 + current->mm->delta_stack = 0UL;
40109 +#endif
40110 +
40111 + current->mm->def_flags = 0;
40112 +
40113 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40114 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40115 + send_sig(SIGKILL, current, 0);
40116 + goto out_free_dentry;
40117 + }
40118 +#endif
40119 +
40120 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40121 + pax_set_initial_flags(bprm);
40122 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40123 + if (pax_set_initial_flags_func)
40124 + (pax_set_initial_flags_func)(bprm);
40125 +#endif
40126 +
40127 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40128 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40129 + current->mm->context.user_cs_limit = PAGE_SIZE;
40130 + current->mm->def_flags |= VM_PAGEEXEC;
40131 + }
40132 +#endif
40133 +
40134 +#ifdef CONFIG_PAX_SEGMEXEC
40135 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40136 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40137 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40138 + pax_task_size = SEGMEXEC_TASK_SIZE;
40139 + current->mm->def_flags |= VM_NOHUGEPAGE;
40140 + }
40141 +#endif
40142 +
40143 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40144 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40145 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40146 + put_cpu();
40147 + }
40148 +#endif
40149
40150 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40151 may depend on the personality. */
40152 SET_PERSONALITY(loc->elf_ex);
40153 +
40154 +#ifdef CONFIG_PAX_ASLR
40155 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40156 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40157 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40158 + }
40159 +#endif
40160 +
40161 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40162 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40163 + executable_stack = EXSTACK_DISABLE_X;
40164 + current->personality &= ~READ_IMPLIES_EXEC;
40165 + } else
40166 +#endif
40167 +
40168 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40169 current->personality |= READ_IMPLIES_EXEC;
40170
40171 @@ -808,6 +1249,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40172 #else
40173 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40174 #endif
40175 +
40176 +#ifdef CONFIG_PAX_RANDMMAP
40177 + /* PaX: randomize base address at the default exe base if requested */
40178 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40179 +#ifdef CONFIG_SPARC64
40180 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40181 +#else
40182 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40183 +#endif
40184 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40185 + elf_flags |= MAP_FIXED;
40186 + }
40187 +#endif
40188 +
40189 }
40190
40191 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40192 @@ -840,9 +1295,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40193 * allowed task size. Note that p_filesz must always be
40194 * <= p_memsz so it is only necessary to check p_memsz.
40195 */
40196 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40197 - elf_ppnt->p_memsz > TASK_SIZE ||
40198 - TASK_SIZE - elf_ppnt->p_memsz < k) {
40199 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40200 + elf_ppnt->p_memsz > pax_task_size ||
40201 + pax_task_size - elf_ppnt->p_memsz < k) {
40202 /* set_brk can never work. Avoid overflows. */
40203 send_sig(SIGKILL, current, 0);
40204 retval = -EINVAL;
40205 @@ -870,6 +1325,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40206 start_data += load_bias;
40207 end_data += load_bias;
40208
40209 +#ifdef CONFIG_PAX_RANDMMAP
40210 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40211 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40212 +#endif
40213 +
40214 /* Calling set_brk effectively mmaps the pages that we need
40215 * for the bss and break sections. We must do this before
40216 * mapping in the interpreter, to make sure it doesn't wind
40217 @@ -881,9 +1341,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40218 goto out_free_dentry;
40219 }
40220 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40221 - send_sig(SIGSEGV, current, 0);
40222 - retval = -EFAULT; /* Nobody gets to see this, but.. */
40223 - goto out_free_dentry;
40224 + /*
40225 + * This bss-zeroing can fail if the ELF
40226 + * file specifies odd protections. So
40227 + * we don't check the return value
40228 + */
40229 }
40230
40231 if (elf_interpreter) {
40232 @@ -1098,7 +1560,7 @@ out:
40233 * Decide what to dump of a segment, part, all or none.
40234 */
40235 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40236 - unsigned long mm_flags)
40237 + unsigned long mm_flags, long signr)
40238 {
40239 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40240
40241 @@ -1132,7 +1594,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40242 if (vma->vm_file == NULL)
40243 return 0;
40244
40245 - if (FILTER(MAPPED_PRIVATE))
40246 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40247 goto whole;
40248
40249 /*
40250 @@ -1354,9 +1816,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40251 {
40252 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40253 int i = 0;
40254 - do
40255 + do {
40256 i += 2;
40257 - while (auxv[i - 2] != AT_NULL);
40258 + } while (auxv[i - 2] != AT_NULL);
40259 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40260 }
40261
40262 @@ -1862,14 +2324,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40263 }
40264
40265 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40266 - unsigned long mm_flags)
40267 + struct coredump_params *cprm)
40268 {
40269 struct vm_area_struct *vma;
40270 size_t size = 0;
40271
40272 for (vma = first_vma(current, gate_vma); vma != NULL;
40273 vma = next_vma(vma, gate_vma))
40274 - size += vma_dump_size(vma, mm_flags);
40275 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40276 return size;
40277 }
40278
40279 @@ -1963,7 +2425,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40280
40281 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40282
40283 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40284 + offset += elf_core_vma_data_size(gate_vma, cprm);
40285 offset += elf_core_extra_data_size();
40286 e_shoff = offset;
40287
40288 @@ -1977,10 +2439,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40289 offset = dataoff;
40290
40291 size += sizeof(*elf);
40292 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40293 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40294 goto end_coredump;
40295
40296 size += sizeof(*phdr4note);
40297 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40298 if (size > cprm->limit
40299 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40300 goto end_coredump;
40301 @@ -1994,7 +2458,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40302 phdr.p_offset = offset;
40303 phdr.p_vaddr = vma->vm_start;
40304 phdr.p_paddr = 0;
40305 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40306 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40307 phdr.p_memsz = vma->vm_end - vma->vm_start;
40308 offset += phdr.p_filesz;
40309 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40310 @@ -2005,6 +2469,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40311 phdr.p_align = ELF_EXEC_PAGESIZE;
40312
40313 size += sizeof(phdr);
40314 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40315 if (size > cprm->limit
40316 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40317 goto end_coredump;
40318 @@ -2029,7 +2494,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40319 unsigned long addr;
40320 unsigned long end;
40321
40322 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40323 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40324
40325 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40326 struct page *page;
40327 @@ -2038,6 +2503,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40328 page = get_dump_page(addr);
40329 if (page) {
40330 void *kaddr = kmap(page);
40331 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40332 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40333 !dump_write(cprm->file, kaddr,
40334 PAGE_SIZE);
40335 @@ -2055,6 +2521,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40336
40337 if (e_phnum == PN_XNUM) {
40338 size += sizeof(*shdr4extnum);
40339 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40340 if (size > cprm->limit
40341 || !dump_write(cprm->file, shdr4extnum,
40342 sizeof(*shdr4extnum)))
40343 @@ -2075,6 +2542,97 @@ out:
40344
40345 #endif /* CONFIG_ELF_CORE */
40346
40347 +#ifdef CONFIG_PAX_MPROTECT
40348 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
40349 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40350 + * we'll remove VM_MAYWRITE for good on RELRO segments.
40351 + *
40352 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40353 + * basis because we want to allow the common case and not the special ones.
40354 + */
40355 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40356 +{
40357 + struct elfhdr elf_h;
40358 + struct elf_phdr elf_p;
40359 + unsigned long i;
40360 + unsigned long oldflags;
40361 + bool is_textrel_rw, is_textrel_rx, is_relro;
40362 +
40363 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40364 + return;
40365 +
40366 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40367 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40368 +
40369 +#ifdef CONFIG_PAX_ELFRELOCS
40370 + /* possible TEXTREL */
40371 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40372 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40373 +#else
40374 + is_textrel_rw = false;
40375 + is_textrel_rx = false;
40376 +#endif
40377 +
40378 + /* possible RELRO */
40379 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40380 +
40381 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40382 + return;
40383 +
40384 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40385 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40386 +
40387 +#ifdef CONFIG_PAX_ETEXECRELOCS
40388 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40389 +#else
40390 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40391 +#endif
40392 +
40393 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40394 + !elf_check_arch(&elf_h) ||
40395 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40396 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40397 + return;
40398 +
40399 + for (i = 0UL; i < elf_h.e_phnum; i++) {
40400 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40401 + return;
40402 + switch (elf_p.p_type) {
40403 + case PT_DYNAMIC:
40404 + if (!is_textrel_rw && !is_textrel_rx)
40405 + continue;
40406 + i = 0UL;
40407 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40408 + elf_dyn dyn;
40409 +
40410 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40411 + return;
40412 + if (dyn.d_tag == DT_NULL)
40413 + return;
40414 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40415 + gr_log_textrel(vma);
40416 + if (is_textrel_rw)
40417 + vma->vm_flags |= VM_MAYWRITE;
40418 + else
40419 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40420 + vma->vm_flags &= ~VM_MAYWRITE;
40421 + return;
40422 + }
40423 + i++;
40424 + }
40425 + return;
40426 +
40427 + case PT_GNU_RELRO:
40428 + if (!is_relro)
40429 + continue;
40430 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40431 + vma->vm_flags &= ~VM_MAYWRITE;
40432 + return;
40433 + }
40434 + }
40435 +}
40436 +#endif
40437 +
40438 static int __init init_elf_binfmt(void)
40439 {
40440 return register_binfmt(&elf_format);
40441 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40442 index 1bffbe0..c8c283e 100644
40443 --- a/fs/binfmt_flat.c
40444 +++ b/fs/binfmt_flat.c
40445 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40446 realdatastart = (unsigned long) -ENOMEM;
40447 printk("Unable to allocate RAM for process data, errno %d\n",
40448 (int)-realdatastart);
40449 + down_write(&current->mm->mmap_sem);
40450 do_munmap(current->mm, textpos, text_len);
40451 + up_write(&current->mm->mmap_sem);
40452 ret = realdatastart;
40453 goto err;
40454 }
40455 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40456 }
40457 if (IS_ERR_VALUE(result)) {
40458 printk("Unable to read data+bss, errno %d\n", (int)-result);
40459 + down_write(&current->mm->mmap_sem);
40460 do_munmap(current->mm, textpos, text_len);
40461 do_munmap(current->mm, realdatastart, len);
40462 + up_write(&current->mm->mmap_sem);
40463 ret = result;
40464 goto err;
40465 }
40466 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40467 }
40468 if (IS_ERR_VALUE(result)) {
40469 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40470 + down_write(&current->mm->mmap_sem);
40471 do_munmap(current->mm, textpos, text_len + data_len + extra +
40472 MAX_SHARED_LIBS * sizeof(unsigned long));
40473 + up_write(&current->mm->mmap_sem);
40474 ret = result;
40475 goto err;
40476 }
40477 diff --git a/fs/bio.c b/fs/bio.c
40478 index b1fe82c..84da0a9 100644
40479 --- a/fs/bio.c
40480 +++ b/fs/bio.c
40481 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40482 const int read = bio_data_dir(bio) == READ;
40483 struct bio_map_data *bmd = bio->bi_private;
40484 int i;
40485 - char *p = bmd->sgvecs[0].iov_base;
40486 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40487
40488 __bio_for_each_segment(bvec, bio, i, 0) {
40489 char *addr = page_address(bvec->bv_page);
40490 diff --git a/fs/block_dev.c b/fs/block_dev.c
40491 index b07f1da..9efcb92 100644
40492 --- a/fs/block_dev.c
40493 +++ b/fs/block_dev.c
40494 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40495 else if (bdev->bd_contains == bdev)
40496 return true; /* is a whole device which isn't held */
40497
40498 - else if (whole->bd_holder == bd_may_claim)
40499 + else if (whole->bd_holder == (void *)bd_may_claim)
40500 return true; /* is a partition of a device that is being partitioned */
40501 else if (whole->bd_holder != NULL)
40502 return false; /* is a partition of a held device */
40503 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40504 index dede441..f2a2507 100644
40505 --- a/fs/btrfs/ctree.c
40506 +++ b/fs/btrfs/ctree.c
40507 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40508 free_extent_buffer(buf);
40509 add_root_to_dirty_list(root);
40510 } else {
40511 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40512 - parent_start = parent->start;
40513 - else
40514 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40515 + if (parent)
40516 + parent_start = parent->start;
40517 + else
40518 + parent_start = 0;
40519 + } else
40520 parent_start = 0;
40521
40522 WARN_ON(trans->transid != btrfs_header_generation(parent));
40523 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40524 index fd1a06d..6e9033d 100644
40525 --- a/fs/btrfs/inode.c
40526 +++ b/fs/btrfs/inode.c
40527 @@ -6895,7 +6895,7 @@ fail:
40528 return -ENOMEM;
40529 }
40530
40531 -static int btrfs_getattr(struct vfsmount *mnt,
40532 +int btrfs_getattr(struct vfsmount *mnt,
40533 struct dentry *dentry, struct kstat *stat)
40534 {
40535 struct inode *inode = dentry->d_inode;
40536 @@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40537 return 0;
40538 }
40539
40540 +EXPORT_SYMBOL(btrfs_getattr);
40541 +
40542 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40543 +{
40544 + return BTRFS_I(inode)->root->anon_dev;
40545 +}
40546 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40547 +
40548 /*
40549 * If a file is moved, it will inherit the cow and compression flags of the new
40550 * directory.
40551 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40552 index c04f02c..f5c9e2e 100644
40553 --- a/fs/btrfs/ioctl.c
40554 +++ b/fs/btrfs/ioctl.c
40555 @@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40556 for (i = 0; i < num_types; i++) {
40557 struct btrfs_space_info *tmp;
40558
40559 + /* Don't copy in more than we allocated */
40560 if (!slot_count)
40561 break;
40562
40563 + slot_count--;
40564 +
40565 info = NULL;
40566 rcu_read_lock();
40567 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40568 @@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40569 memcpy(dest, &space, sizeof(space));
40570 dest++;
40571 space_args.total_spaces++;
40572 - slot_count--;
40573 }
40574 - if (!slot_count)
40575 - break;
40576 }
40577 up_read(&info->groups_sem);
40578 }
40579
40580 - user_dest = (struct btrfs_ioctl_space_info *)
40581 + user_dest = (struct btrfs_ioctl_space_info __user *)
40582 (arg + sizeof(struct btrfs_ioctl_space_args));
40583
40584 if (copy_to_user(user_dest, dest_orig, alloc_size))
40585 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40586 index cfb5543..1ae7347 100644
40587 --- a/fs/btrfs/relocation.c
40588 +++ b/fs/btrfs/relocation.c
40589 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40590 }
40591 spin_unlock(&rc->reloc_root_tree.lock);
40592
40593 - BUG_ON((struct btrfs_root *)node->data != root);
40594 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40595
40596 if (!del) {
40597 spin_lock(&rc->reloc_root_tree.lock);
40598 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40599 index 622f469..e8d2d55 100644
40600 --- a/fs/cachefiles/bind.c
40601 +++ b/fs/cachefiles/bind.c
40602 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40603 args);
40604
40605 /* start by checking things over */
40606 - ASSERT(cache->fstop_percent >= 0 &&
40607 - cache->fstop_percent < cache->fcull_percent &&
40608 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40609 cache->fcull_percent < cache->frun_percent &&
40610 cache->frun_percent < 100);
40611
40612 - ASSERT(cache->bstop_percent >= 0 &&
40613 - cache->bstop_percent < cache->bcull_percent &&
40614 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40615 cache->bcull_percent < cache->brun_percent &&
40616 cache->brun_percent < 100);
40617
40618 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40619 index 0a1467b..6a53245 100644
40620 --- a/fs/cachefiles/daemon.c
40621 +++ b/fs/cachefiles/daemon.c
40622 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40623 if (n > buflen)
40624 return -EMSGSIZE;
40625
40626 - if (copy_to_user(_buffer, buffer, n) != 0)
40627 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40628 return -EFAULT;
40629
40630 return n;
40631 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40632 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40633 return -EIO;
40634
40635 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40636 + if (datalen > PAGE_SIZE - 1)
40637 return -EOPNOTSUPP;
40638
40639 /* drag the command string into the kernel so we can parse it */
40640 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40641 if (args[0] != '%' || args[1] != '\0')
40642 return -EINVAL;
40643
40644 - if (fstop < 0 || fstop >= cache->fcull_percent)
40645 + if (fstop >= cache->fcull_percent)
40646 return cachefiles_daemon_range_error(cache, args);
40647
40648 cache->fstop_percent = fstop;
40649 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40650 if (args[0] != '%' || args[1] != '\0')
40651 return -EINVAL;
40652
40653 - if (bstop < 0 || bstop >= cache->bcull_percent)
40654 + if (bstop >= cache->bcull_percent)
40655 return cachefiles_daemon_range_error(cache, args);
40656
40657 cache->bstop_percent = bstop;
40658 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40659 index bd6bc1b..b627b53 100644
40660 --- a/fs/cachefiles/internal.h
40661 +++ b/fs/cachefiles/internal.h
40662 @@ -57,7 +57,7 @@ struct cachefiles_cache {
40663 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40664 struct rb_root active_nodes; /* active nodes (can't be culled) */
40665 rwlock_t active_lock; /* lock for active_nodes */
40666 - atomic_t gravecounter; /* graveyard uniquifier */
40667 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40668 unsigned frun_percent; /* when to stop culling (% files) */
40669 unsigned fcull_percent; /* when to start culling (% files) */
40670 unsigned fstop_percent; /* when to stop allocating (% files) */
40671 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40672 * proc.c
40673 */
40674 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40675 -extern atomic_t cachefiles_lookup_histogram[HZ];
40676 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40677 -extern atomic_t cachefiles_create_histogram[HZ];
40678 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40679 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40680 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40681
40682 extern int __init cachefiles_proc_init(void);
40683 extern void cachefiles_proc_cleanup(void);
40684 static inline
40685 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40686 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40687 {
40688 unsigned long jif = jiffies - start_jif;
40689 if (jif >= HZ)
40690 jif = HZ - 1;
40691 - atomic_inc(&histogram[jif]);
40692 + atomic_inc_unchecked(&histogram[jif]);
40693 }
40694
40695 #else
40696 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
40697 index a0358c2..d6137f2 100644
40698 --- a/fs/cachefiles/namei.c
40699 +++ b/fs/cachefiles/namei.c
40700 @@ -318,7 +318,7 @@ try_again:
40701 /* first step is to make up a grave dentry in the graveyard */
40702 sprintf(nbuffer, "%08x%08x",
40703 (uint32_t) get_seconds(),
40704 - (uint32_t) atomic_inc_return(&cache->gravecounter));
40705 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40706
40707 /* do the multiway lock magic */
40708 trap = lock_rename(cache->graveyard, dir);
40709 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
40710 index eccd339..4c1d995 100644
40711 --- a/fs/cachefiles/proc.c
40712 +++ b/fs/cachefiles/proc.c
40713 @@ -14,9 +14,9 @@
40714 #include <linux/seq_file.h>
40715 #include "internal.h"
40716
40717 -atomic_t cachefiles_lookup_histogram[HZ];
40718 -atomic_t cachefiles_mkdir_histogram[HZ];
40719 -atomic_t cachefiles_create_histogram[HZ];
40720 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40721 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40722 +atomic_unchecked_t cachefiles_create_histogram[HZ];
40723
40724 /*
40725 * display the latency histogram
40726 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
40727 return 0;
40728 default:
40729 index = (unsigned long) v - 3;
40730 - x = atomic_read(&cachefiles_lookup_histogram[index]);
40731 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
40732 - z = atomic_read(&cachefiles_create_histogram[index]);
40733 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40734 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40735 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40736 if (x == 0 && y == 0 && z == 0)
40737 return 0;
40738
40739 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
40740 index 0e3c092..818480e 100644
40741 --- a/fs/cachefiles/rdwr.c
40742 +++ b/fs/cachefiles/rdwr.c
40743 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
40744 old_fs = get_fs();
40745 set_fs(KERNEL_DS);
40746 ret = file->f_op->write(
40747 - file, (const void __user *) data, len, &pos);
40748 + file, (const void __force_user *) data, len, &pos);
40749 set_fs(old_fs);
40750 kunmap(page);
40751 if (ret != len)
40752 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
40753 index 9895400..fa40a7d 100644
40754 --- a/fs/ceph/dir.c
40755 +++ b/fs/ceph/dir.c
40756 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
40757 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40758 struct ceph_mds_client *mdsc = fsc->mdsc;
40759 unsigned frag = fpos_frag(filp->f_pos);
40760 - int off = fpos_off(filp->f_pos);
40761 + unsigned int off = fpos_off(filp->f_pos);
40762 int err;
40763 u32 ftype;
40764 struct ceph_mds_reply_info_parsed *rinfo;
40765 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
40766 index 84e8c07..6170d31 100644
40767 --- a/fs/cifs/cifs_debug.c
40768 +++ b/fs/cifs/cifs_debug.c
40769 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40770
40771 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40772 #ifdef CONFIG_CIFS_STATS2
40773 - atomic_set(&totBufAllocCount, 0);
40774 - atomic_set(&totSmBufAllocCount, 0);
40775 + atomic_set_unchecked(&totBufAllocCount, 0);
40776 + atomic_set_unchecked(&totSmBufAllocCount, 0);
40777 #endif /* CONFIG_CIFS_STATS2 */
40778 spin_lock(&cifs_tcp_ses_lock);
40779 list_for_each(tmp1, &cifs_tcp_ses_list) {
40780 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40781 tcon = list_entry(tmp3,
40782 struct cifs_tcon,
40783 tcon_list);
40784 - atomic_set(&tcon->num_smbs_sent, 0);
40785 - atomic_set(&tcon->num_writes, 0);
40786 - atomic_set(&tcon->num_reads, 0);
40787 - atomic_set(&tcon->num_oplock_brks, 0);
40788 - atomic_set(&tcon->num_opens, 0);
40789 - atomic_set(&tcon->num_posixopens, 0);
40790 - atomic_set(&tcon->num_posixmkdirs, 0);
40791 - atomic_set(&tcon->num_closes, 0);
40792 - atomic_set(&tcon->num_deletes, 0);
40793 - atomic_set(&tcon->num_mkdirs, 0);
40794 - atomic_set(&tcon->num_rmdirs, 0);
40795 - atomic_set(&tcon->num_renames, 0);
40796 - atomic_set(&tcon->num_t2renames, 0);
40797 - atomic_set(&tcon->num_ffirst, 0);
40798 - atomic_set(&tcon->num_fnext, 0);
40799 - atomic_set(&tcon->num_fclose, 0);
40800 - atomic_set(&tcon->num_hardlinks, 0);
40801 - atomic_set(&tcon->num_symlinks, 0);
40802 - atomic_set(&tcon->num_locks, 0);
40803 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40804 + atomic_set_unchecked(&tcon->num_writes, 0);
40805 + atomic_set_unchecked(&tcon->num_reads, 0);
40806 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40807 + atomic_set_unchecked(&tcon->num_opens, 0);
40808 + atomic_set_unchecked(&tcon->num_posixopens, 0);
40809 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40810 + atomic_set_unchecked(&tcon->num_closes, 0);
40811 + atomic_set_unchecked(&tcon->num_deletes, 0);
40812 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
40813 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
40814 + atomic_set_unchecked(&tcon->num_renames, 0);
40815 + atomic_set_unchecked(&tcon->num_t2renames, 0);
40816 + atomic_set_unchecked(&tcon->num_ffirst, 0);
40817 + atomic_set_unchecked(&tcon->num_fnext, 0);
40818 + atomic_set_unchecked(&tcon->num_fclose, 0);
40819 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
40820 + atomic_set_unchecked(&tcon->num_symlinks, 0);
40821 + atomic_set_unchecked(&tcon->num_locks, 0);
40822 }
40823 }
40824 }
40825 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40826 smBufAllocCount.counter, cifs_min_small);
40827 #ifdef CONFIG_CIFS_STATS2
40828 seq_printf(m, "Total Large %d Small %d Allocations\n",
40829 - atomic_read(&totBufAllocCount),
40830 - atomic_read(&totSmBufAllocCount));
40831 + atomic_read_unchecked(&totBufAllocCount),
40832 + atomic_read_unchecked(&totSmBufAllocCount));
40833 #endif /* CONFIG_CIFS_STATS2 */
40834
40835 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40836 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40837 if (tcon->need_reconnect)
40838 seq_puts(m, "\tDISCONNECTED ");
40839 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40840 - atomic_read(&tcon->num_smbs_sent),
40841 - atomic_read(&tcon->num_oplock_brks));
40842 + atomic_read_unchecked(&tcon->num_smbs_sent),
40843 + atomic_read_unchecked(&tcon->num_oplock_brks));
40844 seq_printf(m, "\nReads: %d Bytes: %lld",
40845 - atomic_read(&tcon->num_reads),
40846 + atomic_read_unchecked(&tcon->num_reads),
40847 (long long)(tcon->bytes_read));
40848 seq_printf(m, "\nWrites: %d Bytes: %lld",
40849 - atomic_read(&tcon->num_writes),
40850 + atomic_read_unchecked(&tcon->num_writes),
40851 (long long)(tcon->bytes_written));
40852 seq_printf(m, "\nFlushes: %d",
40853 - atomic_read(&tcon->num_flushes));
40854 + atomic_read_unchecked(&tcon->num_flushes));
40855 seq_printf(m, "\nLocks: %d HardLinks: %d "
40856 "Symlinks: %d",
40857 - atomic_read(&tcon->num_locks),
40858 - atomic_read(&tcon->num_hardlinks),
40859 - atomic_read(&tcon->num_symlinks));
40860 + atomic_read_unchecked(&tcon->num_locks),
40861 + atomic_read_unchecked(&tcon->num_hardlinks),
40862 + atomic_read_unchecked(&tcon->num_symlinks));
40863 seq_printf(m, "\nOpens: %d Closes: %d "
40864 "Deletes: %d",
40865 - atomic_read(&tcon->num_opens),
40866 - atomic_read(&tcon->num_closes),
40867 - atomic_read(&tcon->num_deletes));
40868 + atomic_read_unchecked(&tcon->num_opens),
40869 + atomic_read_unchecked(&tcon->num_closes),
40870 + atomic_read_unchecked(&tcon->num_deletes));
40871 seq_printf(m, "\nPosix Opens: %d "
40872 "Posix Mkdirs: %d",
40873 - atomic_read(&tcon->num_posixopens),
40874 - atomic_read(&tcon->num_posixmkdirs));
40875 + atomic_read_unchecked(&tcon->num_posixopens),
40876 + atomic_read_unchecked(&tcon->num_posixmkdirs));
40877 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40878 - atomic_read(&tcon->num_mkdirs),
40879 - atomic_read(&tcon->num_rmdirs));
40880 + atomic_read_unchecked(&tcon->num_mkdirs),
40881 + atomic_read_unchecked(&tcon->num_rmdirs));
40882 seq_printf(m, "\nRenames: %d T2 Renames %d",
40883 - atomic_read(&tcon->num_renames),
40884 - atomic_read(&tcon->num_t2renames));
40885 + atomic_read_unchecked(&tcon->num_renames),
40886 + atomic_read_unchecked(&tcon->num_t2renames));
40887 seq_printf(m, "\nFindFirst: %d FNext %d "
40888 "FClose %d",
40889 - atomic_read(&tcon->num_ffirst),
40890 - atomic_read(&tcon->num_fnext),
40891 - atomic_read(&tcon->num_fclose));
40892 + atomic_read_unchecked(&tcon->num_ffirst),
40893 + atomic_read_unchecked(&tcon->num_fnext),
40894 + atomic_read_unchecked(&tcon->num_fclose));
40895 }
40896 }
40897 }
40898 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
40899 index 8f1fe32..38f9e27 100644
40900 --- a/fs/cifs/cifsfs.c
40901 +++ b/fs/cifs/cifsfs.c
40902 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
40903 cifs_req_cachep = kmem_cache_create("cifs_request",
40904 CIFSMaxBufSize +
40905 MAX_CIFS_HDR_SIZE, 0,
40906 - SLAB_HWCACHE_ALIGN, NULL);
40907 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40908 if (cifs_req_cachep == NULL)
40909 return -ENOMEM;
40910
40911 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
40912 efficient to alloc 1 per page off the slab compared to 17K (5page)
40913 alloc of large cifs buffers even when page debugging is on */
40914 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40915 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40916 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40917 NULL);
40918 if (cifs_sm_req_cachep == NULL) {
40919 mempool_destroy(cifs_req_poolp);
40920 @@ -1101,8 +1101,8 @@ init_cifs(void)
40921 atomic_set(&bufAllocCount, 0);
40922 atomic_set(&smBufAllocCount, 0);
40923 #ifdef CONFIG_CIFS_STATS2
40924 - atomic_set(&totBufAllocCount, 0);
40925 - atomic_set(&totSmBufAllocCount, 0);
40926 + atomic_set_unchecked(&totBufAllocCount, 0);
40927 + atomic_set_unchecked(&totSmBufAllocCount, 0);
40928 #endif /* CONFIG_CIFS_STATS2 */
40929
40930 atomic_set(&midCount, 0);
40931 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
40932 index 8238aa1..0347196 100644
40933 --- a/fs/cifs/cifsglob.h
40934 +++ b/fs/cifs/cifsglob.h
40935 @@ -392,28 +392,28 @@ struct cifs_tcon {
40936 __u16 Flags; /* optional support bits */
40937 enum statusEnum tidStatus;
40938 #ifdef CONFIG_CIFS_STATS
40939 - atomic_t num_smbs_sent;
40940 - atomic_t num_writes;
40941 - atomic_t num_reads;
40942 - atomic_t num_flushes;
40943 - atomic_t num_oplock_brks;
40944 - atomic_t num_opens;
40945 - atomic_t num_closes;
40946 - atomic_t num_deletes;
40947 - atomic_t num_mkdirs;
40948 - atomic_t num_posixopens;
40949 - atomic_t num_posixmkdirs;
40950 - atomic_t num_rmdirs;
40951 - atomic_t num_renames;
40952 - atomic_t num_t2renames;
40953 - atomic_t num_ffirst;
40954 - atomic_t num_fnext;
40955 - atomic_t num_fclose;
40956 - atomic_t num_hardlinks;
40957 - atomic_t num_symlinks;
40958 - atomic_t num_locks;
40959 - atomic_t num_acl_get;
40960 - atomic_t num_acl_set;
40961 + atomic_unchecked_t num_smbs_sent;
40962 + atomic_unchecked_t num_writes;
40963 + atomic_unchecked_t num_reads;
40964 + atomic_unchecked_t num_flushes;
40965 + atomic_unchecked_t num_oplock_brks;
40966 + atomic_unchecked_t num_opens;
40967 + atomic_unchecked_t num_closes;
40968 + atomic_unchecked_t num_deletes;
40969 + atomic_unchecked_t num_mkdirs;
40970 + atomic_unchecked_t num_posixopens;
40971 + atomic_unchecked_t num_posixmkdirs;
40972 + atomic_unchecked_t num_rmdirs;
40973 + atomic_unchecked_t num_renames;
40974 + atomic_unchecked_t num_t2renames;
40975 + atomic_unchecked_t num_ffirst;
40976 + atomic_unchecked_t num_fnext;
40977 + atomic_unchecked_t num_fclose;
40978 + atomic_unchecked_t num_hardlinks;
40979 + atomic_unchecked_t num_symlinks;
40980 + atomic_unchecked_t num_locks;
40981 + atomic_unchecked_t num_acl_get;
40982 + atomic_unchecked_t num_acl_set;
40983 #ifdef CONFIG_CIFS_STATS2
40984 unsigned long long time_writes;
40985 unsigned long long time_reads;
40986 @@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
40987 }
40988
40989 #ifdef CONFIG_CIFS_STATS
40990 -#define cifs_stats_inc atomic_inc
40991 +#define cifs_stats_inc atomic_inc_unchecked
40992
40993 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
40994 unsigned int bytes)
40995 @@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
40996 /* Various Debug counters */
40997 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40998 #ifdef CONFIG_CIFS_STATS2
40999 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41000 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41001 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41002 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41003 #endif
41004 GLOBAL_EXTERN atomic_t smBufAllocCount;
41005 GLOBAL_EXTERN atomic_t midCount;
41006 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41007 index 6b0e064..94e6c3c 100644
41008 --- a/fs/cifs/link.c
41009 +++ b/fs/cifs/link.c
41010 @@ -600,7 +600,7 @@ symlink_exit:
41011
41012 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41013 {
41014 - char *p = nd_get_link(nd);
41015 + const char *p = nd_get_link(nd);
41016 if (!IS_ERR(p))
41017 kfree(p);
41018 }
41019 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41020 index 703ef5c..2a44ed5 100644
41021 --- a/fs/cifs/misc.c
41022 +++ b/fs/cifs/misc.c
41023 @@ -156,7 +156,7 @@ cifs_buf_get(void)
41024 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41025 atomic_inc(&bufAllocCount);
41026 #ifdef CONFIG_CIFS_STATS2
41027 - atomic_inc(&totBufAllocCount);
41028 + atomic_inc_unchecked(&totBufAllocCount);
41029 #endif /* CONFIG_CIFS_STATS2 */
41030 }
41031
41032 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41033 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41034 atomic_inc(&smBufAllocCount);
41035 #ifdef CONFIG_CIFS_STATS2
41036 - atomic_inc(&totSmBufAllocCount);
41037 + atomic_inc_unchecked(&totSmBufAllocCount);
41038 #endif /* CONFIG_CIFS_STATS2 */
41039
41040 }
41041 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41042 index 6901578..d402eb5 100644
41043 --- a/fs/coda/cache.c
41044 +++ b/fs/coda/cache.c
41045 @@ -24,7 +24,7 @@
41046 #include "coda_linux.h"
41047 #include "coda_cache.h"
41048
41049 -static atomic_t permission_epoch = ATOMIC_INIT(0);
41050 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41051
41052 /* replace or extend an acl cache hit */
41053 void coda_cache_enter(struct inode *inode, int mask)
41054 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41055 struct coda_inode_info *cii = ITOC(inode);
41056
41057 spin_lock(&cii->c_lock);
41058 - cii->c_cached_epoch = atomic_read(&permission_epoch);
41059 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41060 if (cii->c_uid != current_fsuid()) {
41061 cii->c_uid = current_fsuid();
41062 cii->c_cached_perm = mask;
41063 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41064 {
41065 struct coda_inode_info *cii = ITOC(inode);
41066 spin_lock(&cii->c_lock);
41067 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41068 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41069 spin_unlock(&cii->c_lock);
41070 }
41071
41072 /* remove all acl caches */
41073 void coda_cache_clear_all(struct super_block *sb)
41074 {
41075 - atomic_inc(&permission_epoch);
41076 + atomic_inc_unchecked(&permission_epoch);
41077 }
41078
41079
41080 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41081 spin_lock(&cii->c_lock);
41082 hit = (mask & cii->c_cached_perm) == mask &&
41083 cii->c_uid == current_fsuid() &&
41084 - cii->c_cached_epoch == atomic_read(&permission_epoch);
41085 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41086 spin_unlock(&cii->c_lock);
41087
41088 return hit;
41089 diff --git a/fs/compat.c b/fs/compat.c
41090 index c987875..08771ca 100644
41091 --- a/fs/compat.c
41092 +++ b/fs/compat.c
41093 @@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41094 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41095 {
41096 compat_ino_t ino = stat->ino;
41097 - typeof(ubuf->st_uid) uid = 0;
41098 - typeof(ubuf->st_gid) gid = 0;
41099 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41100 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41101 int err;
41102
41103 SET_UID(uid, stat->uid);
41104 @@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41105
41106 set_fs(KERNEL_DS);
41107 /* The __user pointer cast is valid because of the set_fs() */
41108 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41109 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41110 set_fs(oldfs);
41111 /* truncating is ok because it's a user address */
41112 if (!ret)
41113 @@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41114 goto out;
41115
41116 ret = -EINVAL;
41117 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41118 + if (nr_segs > UIO_MAXIOV)
41119 goto out;
41120 if (nr_segs > fast_segs) {
41121 ret = -ENOMEM;
41122 @@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41123
41124 struct compat_readdir_callback {
41125 struct compat_old_linux_dirent __user *dirent;
41126 + struct file * file;
41127 int result;
41128 };
41129
41130 @@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41131 buf->result = -EOVERFLOW;
41132 return -EOVERFLOW;
41133 }
41134 +
41135 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41136 + return 0;
41137 +
41138 buf->result++;
41139 dirent = buf->dirent;
41140 if (!access_ok(VERIFY_WRITE, dirent,
41141 @@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41142
41143 buf.result = 0;
41144 buf.dirent = dirent;
41145 + buf.file = file;
41146
41147 error = vfs_readdir(file, compat_fillonedir, &buf);
41148 if (buf.result)
41149 @@ -914,6 +920,7 @@ struct compat_linux_dirent {
41150 struct compat_getdents_callback {
41151 struct compat_linux_dirent __user *current_dir;
41152 struct compat_linux_dirent __user *previous;
41153 + struct file * file;
41154 int count;
41155 int error;
41156 };
41157 @@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41158 buf->error = -EOVERFLOW;
41159 return -EOVERFLOW;
41160 }
41161 +
41162 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41163 + return 0;
41164 +
41165 dirent = buf->previous;
41166 if (dirent) {
41167 if (__put_user(offset, &dirent->d_off))
41168 @@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41169 buf.previous = NULL;
41170 buf.count = count;
41171 buf.error = 0;
41172 + buf.file = file;
41173
41174 error = vfs_readdir(file, compat_filldir, &buf);
41175 if (error >= 0)
41176 @@ -1003,6 +1015,7 @@ out:
41177 struct compat_getdents_callback64 {
41178 struct linux_dirent64 __user *current_dir;
41179 struct linux_dirent64 __user *previous;
41180 + struct file * file;
41181 int count;
41182 int error;
41183 };
41184 @@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41185 buf->error = -EINVAL; /* only used if we fail.. */
41186 if (reclen > buf->count)
41187 return -EINVAL;
41188 +
41189 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41190 + return 0;
41191 +
41192 dirent = buf->previous;
41193
41194 if (dirent) {
41195 @@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41196 buf.previous = NULL;
41197 buf.count = count;
41198 buf.error = 0;
41199 + buf.file = file;
41200
41201 error = vfs_readdir(file, compat_filldir64, &buf);
41202 if (error >= 0)
41203 error = buf.error;
41204 lastdirent = buf.previous;
41205 if (lastdirent) {
41206 - typeof(lastdirent->d_off) d_off = file->f_pos;
41207 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41208 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41209 error = -EFAULT;
41210 else
41211 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41212 index 112e45a..b59845b 100644
41213 --- a/fs/compat_binfmt_elf.c
41214 +++ b/fs/compat_binfmt_elf.c
41215 @@ -30,11 +30,13 @@
41216 #undef elf_phdr
41217 #undef elf_shdr
41218 #undef elf_note
41219 +#undef elf_dyn
41220 #undef elf_addr_t
41221 #define elfhdr elf32_hdr
41222 #define elf_phdr elf32_phdr
41223 #define elf_shdr elf32_shdr
41224 #define elf_note elf32_note
41225 +#define elf_dyn Elf32_Dyn
41226 #define elf_addr_t Elf32_Addr
41227
41228 /*
41229 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41230 index 51352de..93292ff 100644
41231 --- a/fs/compat_ioctl.c
41232 +++ b/fs/compat_ioctl.c
41233 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41234
41235 err = get_user(palp, &up->palette);
41236 err |= get_user(length, &up->length);
41237 + if (err)
41238 + return -EFAULT;
41239
41240 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41241 err = put_user(compat_ptr(palp), &up_native->palette);
41242 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41243 return -EFAULT;
41244 if (__get_user(udata, &ss32->iomem_base))
41245 return -EFAULT;
41246 - ss.iomem_base = compat_ptr(udata);
41247 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41248 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41249 __get_user(ss.port_high, &ss32->port_high))
41250 return -EFAULT;
41251 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41252 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41253 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41254 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41255 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41256 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41257 return -EFAULT;
41258
41259 return ioctl_preallocate(file, p);
41260 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41261 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41262 {
41263 unsigned int a, b;
41264 - a = *(unsigned int *)p;
41265 - b = *(unsigned int *)q;
41266 + a = *(const unsigned int *)p;
41267 + b = *(const unsigned int *)q;
41268 if (a > b)
41269 return 1;
41270 if (a < b)
41271 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41272 index 9a37a9b..35792b6 100644
41273 --- a/fs/configfs/dir.c
41274 +++ b/fs/configfs/dir.c
41275 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41276 }
41277 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41278 struct configfs_dirent *next;
41279 - const char * name;
41280 + const unsigned char * name;
41281 + char d_name[sizeof(next->s_dentry->d_iname)];
41282 int len;
41283 struct inode *inode = NULL;
41284
41285 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41286 continue;
41287
41288 name = configfs_get_name(next);
41289 - len = strlen(name);
41290 + if (next->s_dentry && name == next->s_dentry->d_iname) {
41291 + len = next->s_dentry->d_name.len;
41292 + memcpy(d_name, name, len);
41293 + name = d_name;
41294 + } else
41295 + len = strlen(name);
41296
41297 /*
41298 * We'll have a dentry and an inode for
41299 diff --git a/fs/dcache.c b/fs/dcache.c
41300 index f7908ae..920a680 100644
41301 --- a/fs/dcache.c
41302 +++ b/fs/dcache.c
41303 @@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41304 mempages -= reserve;
41305
41306 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41307 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41308 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41309
41310 dcache_init();
41311 inode_init();
41312 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
41313 index f3a257d..715ac0f 100644
41314 --- a/fs/debugfs/inode.c
41315 +++ b/fs/debugfs/inode.c
41316 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
41317 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
41318 {
41319 return debugfs_create_file(name,
41320 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41321 + S_IFDIR | S_IRWXU,
41322 +#else
41323 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41324 +#endif
41325 parent, NULL, NULL);
41326 }
41327 EXPORT_SYMBOL_GPL(debugfs_create_dir);
41328 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41329 index d2039ca..a766407 100644
41330 --- a/fs/ecryptfs/inode.c
41331 +++ b/fs/ecryptfs/inode.c
41332 @@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41333 old_fs = get_fs();
41334 set_fs(get_ds());
41335 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41336 - (char __user *)lower_buf,
41337 + (char __force_user *)lower_buf,
41338 lower_bufsiz);
41339 set_fs(old_fs);
41340 if (rc < 0)
41341 @@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41342 }
41343 old_fs = get_fs();
41344 set_fs(get_ds());
41345 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41346 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41347 set_fs(old_fs);
41348 if (rc < 0) {
41349 kfree(buf);
41350 @@ -752,7 +752,7 @@ out:
41351 static void
41352 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41353 {
41354 - char *buf = nd_get_link(nd);
41355 + const char *buf = nd_get_link(nd);
41356 if (!IS_ERR(buf)) {
41357 /* Free the char* */
41358 kfree(buf);
41359 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41360 index 0dc5a3d..d3cdeea 100644
41361 --- a/fs/ecryptfs/miscdev.c
41362 +++ b/fs/ecryptfs/miscdev.c
41363 @@ -328,7 +328,7 @@ check_list:
41364 goto out_unlock_msg_ctx;
41365 i = 5;
41366 if (msg_ctx->msg) {
41367 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
41368 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41369 goto out_unlock_msg_ctx;
41370 i += packet_length_size;
41371 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41372 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41373 index 54eb14c..e51b453 100644
41374 --- a/fs/ecryptfs/read_write.c
41375 +++ b/fs/ecryptfs/read_write.c
41376 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41377 return -EIO;
41378 fs_save = get_fs();
41379 set_fs(get_ds());
41380 - rc = vfs_write(lower_file, data, size, &offset);
41381 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41382 set_fs(fs_save);
41383 mark_inode_dirty_sync(ecryptfs_inode);
41384 return rc;
41385 @@ -130,7 +130,12 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
41386 pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
41387 size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
41388 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
41389 - size_t total_remaining_bytes = ((offset + size) - pos);
41390 + loff_t total_remaining_bytes = ((offset + size) - pos);
41391 +
41392 + if (fatal_signal_pending(current)) {
41393 + rc = -EINTR;
41394 + break;
41395 + }
41396
41397 if (fatal_signal_pending(current)) {
41398 rc = -EINTR;
41399 @@ -141,7 +146,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
41400 num_bytes = total_remaining_bytes;
41401 if (pos < offset) {
41402 /* remaining zeros to write, up to destination offset */
41403 - size_t total_remaining_zeros = (offset - pos);
41404 + loff_t total_remaining_zeros = (offset - pos);
41405
41406 if (num_bytes > total_remaining_zeros)
41407 num_bytes = total_remaining_zeros;
41408 @@ -244,7 +249,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41409 return -EIO;
41410 fs_save = get_fs();
41411 set_fs(get_ds());
41412 - rc = vfs_read(lower_file, data, size, &offset);
41413 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41414 set_fs(fs_save);
41415 return rc;
41416 }
41417 diff --git a/fs/exec.c b/fs/exec.c
41418 index 3625464..fac01f4 100644
41419 --- a/fs/exec.c
41420 +++ b/fs/exec.c
41421 @@ -55,12 +55,28 @@
41422 #include <linux/pipe_fs_i.h>
41423 #include <linux/oom.h>
41424 #include <linux/compat.h>
41425 +#include <linux/random.h>
41426 +#include <linux/seq_file.h>
41427 +
41428 +#ifdef CONFIG_PAX_REFCOUNT
41429 +#include <linux/kallsyms.h>
41430 +#include <linux/kdebug.h>
41431 +#endif
41432
41433 #include <asm/uaccess.h>
41434 #include <asm/mmu_context.h>
41435 #include <asm/tlb.h>
41436 #include "internal.h"
41437
41438 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41439 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41440 +#endif
41441 +
41442 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41443 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41444 +EXPORT_SYMBOL(pax_set_initial_flags_func);
41445 +#endif
41446 +
41447 int core_uses_pid;
41448 char core_pattern[CORENAME_MAX_SIZE] = "core";
41449 unsigned int core_pipe_limit;
41450 @@ -70,7 +86,7 @@ struct core_name {
41451 char *corename;
41452 int used, size;
41453 };
41454 -static atomic_t call_count = ATOMIC_INIT(1);
41455 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41456
41457 /* The maximal length of core_pattern is also specified in sysctl.c */
41458
41459 @@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41460 int write)
41461 {
41462 struct page *page;
41463 - int ret;
41464
41465 -#ifdef CONFIG_STACK_GROWSUP
41466 - if (write) {
41467 - ret = expand_downwards(bprm->vma, pos);
41468 - if (ret < 0)
41469 - return NULL;
41470 - }
41471 -#endif
41472 - ret = get_user_pages(current, bprm->mm, pos,
41473 - 1, write, 1, &page, NULL);
41474 - if (ret <= 0)
41475 + if (0 > expand_downwards(bprm->vma, pos))
41476 + return NULL;
41477 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41478 return NULL;
41479
41480 if (write) {
41481 @@ -274,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41482 vma->vm_end = STACK_TOP_MAX;
41483 vma->vm_start = vma->vm_end - PAGE_SIZE;
41484 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41485 +
41486 +#ifdef CONFIG_PAX_SEGMEXEC
41487 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41488 +#endif
41489 +
41490 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41491 INIT_LIST_HEAD(&vma->anon_vma_chain);
41492
41493 @@ -288,6 +301,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41494 mm->stack_vm = mm->total_vm = 1;
41495 up_write(&mm->mmap_sem);
41496 bprm->p = vma->vm_end - sizeof(void *);
41497 +
41498 +#ifdef CONFIG_PAX_RANDUSTACK
41499 + if (randomize_va_space)
41500 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41501 +#endif
41502 +
41503 return 0;
41504 err:
41505 up_write(&mm->mmap_sem);
41506 @@ -396,19 +415,7 @@ err:
41507 return err;
41508 }
41509
41510 -struct user_arg_ptr {
41511 -#ifdef CONFIG_COMPAT
41512 - bool is_compat;
41513 -#endif
41514 - union {
41515 - const char __user *const __user *native;
41516 -#ifdef CONFIG_COMPAT
41517 - compat_uptr_t __user *compat;
41518 -#endif
41519 - } ptr;
41520 -};
41521 -
41522 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41523 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41524 {
41525 const char __user *native;
41526
41527 @@ -417,14 +424,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41528 compat_uptr_t compat;
41529
41530 if (get_user(compat, argv.ptr.compat + nr))
41531 - return ERR_PTR(-EFAULT);
41532 + return (const char __force_user *)ERR_PTR(-EFAULT);
41533
41534 return compat_ptr(compat);
41535 }
41536 #endif
41537
41538 if (get_user(native, argv.ptr.native + nr))
41539 - return ERR_PTR(-EFAULT);
41540 + return (const char __force_user *)ERR_PTR(-EFAULT);
41541
41542 return native;
41543 }
41544 @@ -443,7 +450,7 @@ static int count(struct user_arg_ptr argv, int max)
41545 if (!p)
41546 break;
41547
41548 - if (IS_ERR(p))
41549 + if (IS_ERR((const char __force_kernel *)p))
41550 return -EFAULT;
41551
41552 if (i++ >= max)
41553 @@ -477,7 +484,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41554
41555 ret = -EFAULT;
41556 str = get_user_arg_ptr(argv, argc);
41557 - if (IS_ERR(str))
41558 + if (IS_ERR((const char __force_kernel *)str))
41559 goto out;
41560
41561 len = strnlen_user(str, MAX_ARG_STRLEN);
41562 @@ -559,7 +566,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41563 int r;
41564 mm_segment_t oldfs = get_fs();
41565 struct user_arg_ptr argv = {
41566 - .ptr.native = (const char __user *const __user *)__argv,
41567 + .ptr.native = (const char __force_user *const __force_user *)__argv,
41568 };
41569
41570 set_fs(KERNEL_DS);
41571 @@ -594,7 +601,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41572 unsigned long new_end = old_end - shift;
41573 struct mmu_gather tlb;
41574
41575 - BUG_ON(new_start > new_end);
41576 + if (new_start >= new_end || new_start < mmap_min_addr)
41577 + return -ENOMEM;
41578
41579 /*
41580 * ensure there are no vmas between where we want to go
41581 @@ -603,6 +611,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41582 if (vma != find_vma(mm, new_start))
41583 return -EFAULT;
41584
41585 +#ifdef CONFIG_PAX_SEGMEXEC
41586 + BUG_ON(pax_find_mirror_vma(vma));
41587 +#endif
41588 +
41589 /*
41590 * cover the whole range: [new_start, old_end)
41591 */
41592 @@ -683,10 +695,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41593 stack_top = arch_align_stack(stack_top);
41594 stack_top = PAGE_ALIGN(stack_top);
41595
41596 - if (unlikely(stack_top < mmap_min_addr) ||
41597 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41598 - return -ENOMEM;
41599 -
41600 stack_shift = vma->vm_end - stack_top;
41601
41602 bprm->p -= stack_shift;
41603 @@ -698,8 +706,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41604 bprm->exec -= stack_shift;
41605
41606 down_write(&mm->mmap_sem);
41607 +
41608 + /* Move stack pages down in memory. */
41609 + if (stack_shift) {
41610 + ret = shift_arg_pages(vma, stack_shift);
41611 + if (ret)
41612 + goto out_unlock;
41613 + }
41614 +
41615 vm_flags = VM_STACK_FLAGS;
41616
41617 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41618 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41619 + vm_flags &= ~VM_EXEC;
41620 +
41621 +#ifdef CONFIG_PAX_MPROTECT
41622 + if (mm->pax_flags & MF_PAX_MPROTECT)
41623 + vm_flags &= ~VM_MAYEXEC;
41624 +#endif
41625 +
41626 + }
41627 +#endif
41628 +
41629 /*
41630 * Adjust stack execute permissions; explicitly enable for
41631 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41632 @@ -718,13 +746,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41633 goto out_unlock;
41634 BUG_ON(prev != vma);
41635
41636 - /* Move stack pages down in memory. */
41637 - if (stack_shift) {
41638 - ret = shift_arg_pages(vma, stack_shift);
41639 - if (ret)
41640 - goto out_unlock;
41641 - }
41642 -
41643 /* mprotect_fixup is overkill to remove the temporary stack flags */
41644 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41645
41646 @@ -805,7 +826,7 @@ int kernel_read(struct file *file, loff_t offset,
41647 old_fs = get_fs();
41648 set_fs(get_ds());
41649 /* The cast to a user pointer is valid due to the set_fs() */
41650 - result = vfs_read(file, (void __user *)addr, count, &pos);
41651 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
41652 set_fs(old_fs);
41653 return result;
41654 }
41655 @@ -1247,7 +1268,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
41656 }
41657 rcu_read_unlock();
41658
41659 - if (p->fs->users > n_fs) {
41660 + if (atomic_read(&p->fs->users) > n_fs) {
41661 bprm->unsafe |= LSM_UNSAFE_SHARE;
41662 } else {
41663 res = -EAGAIN;
41664 @@ -1450,6 +1471,11 @@ static int do_execve_common(const char *filename,
41665 struct user_arg_ptr envp,
41666 struct pt_regs *regs)
41667 {
41668 +#ifdef CONFIG_GRKERNSEC
41669 + struct file *old_exec_file;
41670 + struct acl_subject_label *old_acl;
41671 + struct rlimit old_rlim[RLIM_NLIMITS];
41672 +#endif
41673 struct linux_binprm *bprm;
41674 struct file *file;
41675 struct files_struct *displaced;
41676 @@ -1457,6 +1483,8 @@ static int do_execve_common(const char *filename,
41677 int retval;
41678 const struct cred *cred = current_cred();
41679
41680 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41681 +
41682 /*
41683 * We move the actual failure in case of RLIMIT_NPROC excess from
41684 * set*uid() to execve() because too many poorly written programs
41685 @@ -1497,12 +1525,27 @@ static int do_execve_common(const char *filename,
41686 if (IS_ERR(file))
41687 goto out_unmark;
41688
41689 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
41690 + retval = -EPERM;
41691 + goto out_file;
41692 + }
41693 +
41694 sched_exec();
41695
41696 bprm->file = file;
41697 bprm->filename = filename;
41698 bprm->interp = filename;
41699
41700 + if (gr_process_user_ban()) {
41701 + retval = -EPERM;
41702 + goto out_file;
41703 + }
41704 +
41705 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41706 + retval = -EACCES;
41707 + goto out_file;
41708 + }
41709 +
41710 retval = bprm_mm_init(bprm);
41711 if (retval)
41712 goto out_file;
41713 @@ -1532,9 +1575,40 @@ static int do_execve_common(const char *filename,
41714 if (retval < 0)
41715 goto out;
41716
41717 + if (!gr_tpe_allow(file)) {
41718 + retval = -EACCES;
41719 + goto out;
41720 + }
41721 +
41722 + if (gr_check_crash_exec(file)) {
41723 + retval = -EACCES;
41724 + goto out;
41725 + }
41726 +
41727 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41728 +
41729 + gr_handle_exec_args(bprm, argv);
41730 +
41731 +#ifdef CONFIG_GRKERNSEC
41732 + old_acl = current->acl;
41733 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41734 + old_exec_file = current->exec_file;
41735 + get_file(file);
41736 + current->exec_file = file;
41737 +#endif
41738 +
41739 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41740 + bprm->unsafe);
41741 + if (retval < 0)
41742 + goto out_fail;
41743 +
41744 retval = search_binary_handler(bprm,regs);
41745 if (retval < 0)
41746 - goto out;
41747 + goto out_fail;
41748 +#ifdef CONFIG_GRKERNSEC
41749 + if (old_exec_file)
41750 + fput(old_exec_file);
41751 +#endif
41752
41753 /* execve succeeded */
41754 current->fs->in_exec = 0;
41755 @@ -1545,6 +1619,14 @@ static int do_execve_common(const char *filename,
41756 put_files_struct(displaced);
41757 return retval;
41758
41759 +out_fail:
41760 +#ifdef CONFIG_GRKERNSEC
41761 + current->acl = old_acl;
41762 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41763 + fput(current->exec_file);
41764 + current->exec_file = old_exec_file;
41765 +#endif
41766 +
41767 out:
41768 if (bprm->mm) {
41769 acct_arg_size(bprm, 0);
41770 @@ -1618,7 +1700,7 @@ static int expand_corename(struct core_name *cn)
41771 {
41772 char *old_corename = cn->corename;
41773
41774 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41775 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41776 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41777
41778 if (!cn->corename) {
41779 @@ -1715,7 +1797,7 @@ static int format_corename(struct core_name *cn, long signr)
41780 int pid_in_pattern = 0;
41781 int err = 0;
41782
41783 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41784 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41785 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41786 cn->used = 0;
41787
41788 @@ -1812,6 +1894,218 @@ out:
41789 return ispipe;
41790 }
41791
41792 +int pax_check_flags(unsigned long *flags)
41793 +{
41794 + int retval = 0;
41795 +
41796 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41797 + if (*flags & MF_PAX_SEGMEXEC)
41798 + {
41799 + *flags &= ~MF_PAX_SEGMEXEC;
41800 + retval = -EINVAL;
41801 + }
41802 +#endif
41803 +
41804 + if ((*flags & MF_PAX_PAGEEXEC)
41805 +
41806 +#ifdef CONFIG_PAX_PAGEEXEC
41807 + && (*flags & MF_PAX_SEGMEXEC)
41808 +#endif
41809 +
41810 + )
41811 + {
41812 + *flags &= ~MF_PAX_PAGEEXEC;
41813 + retval = -EINVAL;
41814 + }
41815 +
41816 + if ((*flags & MF_PAX_MPROTECT)
41817 +
41818 +#ifdef CONFIG_PAX_MPROTECT
41819 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41820 +#endif
41821 +
41822 + )
41823 + {
41824 + *flags &= ~MF_PAX_MPROTECT;
41825 + retval = -EINVAL;
41826 + }
41827 +
41828 + if ((*flags & MF_PAX_EMUTRAMP)
41829 +
41830 +#ifdef CONFIG_PAX_EMUTRAMP
41831 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41832 +#endif
41833 +
41834 + )
41835 + {
41836 + *flags &= ~MF_PAX_EMUTRAMP;
41837 + retval = -EINVAL;
41838 + }
41839 +
41840 + return retval;
41841 +}
41842 +
41843 +EXPORT_SYMBOL(pax_check_flags);
41844 +
41845 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41846 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41847 +{
41848 + struct task_struct *tsk = current;
41849 + struct mm_struct *mm = current->mm;
41850 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41851 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41852 + char *path_exec = NULL;
41853 + char *path_fault = NULL;
41854 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
41855 +
41856 + if (buffer_exec && buffer_fault) {
41857 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41858 +
41859 + down_read(&mm->mmap_sem);
41860 + vma = mm->mmap;
41861 + while (vma && (!vma_exec || !vma_fault)) {
41862 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41863 + vma_exec = vma;
41864 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41865 + vma_fault = vma;
41866 + vma = vma->vm_next;
41867 + }
41868 + if (vma_exec) {
41869 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41870 + if (IS_ERR(path_exec))
41871 + path_exec = "<path too long>";
41872 + else {
41873 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41874 + if (path_exec) {
41875 + *path_exec = 0;
41876 + path_exec = buffer_exec;
41877 + } else
41878 + path_exec = "<path too long>";
41879 + }
41880 + }
41881 + if (vma_fault) {
41882 + start = vma_fault->vm_start;
41883 + end = vma_fault->vm_end;
41884 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41885 + if (vma_fault->vm_file) {
41886 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41887 + if (IS_ERR(path_fault))
41888 + path_fault = "<path too long>";
41889 + else {
41890 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41891 + if (path_fault) {
41892 + *path_fault = 0;
41893 + path_fault = buffer_fault;
41894 + } else
41895 + path_fault = "<path too long>";
41896 + }
41897 + } else
41898 + path_fault = "<anonymous mapping>";
41899 + }
41900 + up_read(&mm->mmap_sem);
41901 + }
41902 + if (tsk->signal->curr_ip)
41903 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41904 + else
41905 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41906 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41907 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41908 + task_uid(tsk), task_euid(tsk), pc, sp);
41909 + free_page((unsigned long)buffer_exec);
41910 + free_page((unsigned long)buffer_fault);
41911 + pax_report_insns(regs, pc, sp);
41912 + do_coredump(SIGKILL, SIGKILL, regs);
41913 +}
41914 +#endif
41915 +
41916 +#ifdef CONFIG_PAX_REFCOUNT
41917 +void pax_report_refcount_overflow(struct pt_regs *regs)
41918 +{
41919 + if (current->signal->curr_ip)
41920 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41921 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41922 + else
41923 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41924 + current->comm, task_pid_nr(current), current_uid(), current_euid());
41925 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41926 + show_regs(regs);
41927 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41928 +}
41929 +#endif
41930 +
41931 +#ifdef CONFIG_PAX_USERCOPY
41932 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41933 +int object_is_on_stack(const void *obj, unsigned long len)
41934 +{
41935 + const void * const stack = task_stack_page(current);
41936 + const void * const stackend = stack + THREAD_SIZE;
41937 +
41938 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41939 + const void *frame = NULL;
41940 + const void *oldframe;
41941 +#endif
41942 +
41943 + if (obj + len < obj)
41944 + return -1;
41945 +
41946 + if (obj + len <= stack || stackend <= obj)
41947 + return 0;
41948 +
41949 + if (obj < stack || stackend < obj + len)
41950 + return -1;
41951 +
41952 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41953 + oldframe = __builtin_frame_address(1);
41954 + if (oldframe)
41955 + frame = __builtin_frame_address(2);
41956 + /*
41957 + low ----------------------------------------------> high
41958 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
41959 + ^----------------^
41960 + allow copies only within here
41961 + */
41962 + while (stack <= frame && frame < stackend) {
41963 + /* if obj + len extends past the last frame, this
41964 + check won't pass and the next frame will be 0,
41965 + causing us to bail out and correctly report
41966 + the copy as invalid
41967 + */
41968 + if (obj + len <= frame)
41969 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41970 + oldframe = frame;
41971 + frame = *(const void * const *)frame;
41972 + }
41973 + return -1;
41974 +#else
41975 + return 1;
41976 +#endif
41977 +}
41978 +
41979 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41980 +{
41981 + if (current->signal->curr_ip)
41982 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41983 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41984 + else
41985 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41986 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41987 + dump_stack();
41988 + gr_handle_kernel_exploit();
41989 + do_group_exit(SIGKILL);
41990 +}
41991 +#endif
41992 +
41993 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41994 +void pax_track_stack(void)
41995 +{
41996 + unsigned long sp = (unsigned long)&sp;
41997 + if (sp < current_thread_info()->lowest_stack &&
41998 + sp > (unsigned long)task_stack_page(current))
41999 + current_thread_info()->lowest_stack = sp;
42000 +}
42001 +EXPORT_SYMBOL(pax_track_stack);
42002 +#endif
42003 +
42004 static int zap_process(struct task_struct *start, int exit_code)
42005 {
42006 struct task_struct *t;
42007 @@ -2023,17 +2317,17 @@ static void wait_for_dump_helpers(struct file *file)
42008 pipe = file->f_path.dentry->d_inode->i_pipe;
42009
42010 pipe_lock(pipe);
42011 - pipe->readers++;
42012 - pipe->writers--;
42013 + atomic_inc(&pipe->readers);
42014 + atomic_dec(&pipe->writers);
42015
42016 - while ((pipe->readers > 1) && (!signal_pending(current))) {
42017 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42018 wake_up_interruptible_sync(&pipe->wait);
42019 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42020 pipe_wait(pipe);
42021 }
42022
42023 - pipe->readers--;
42024 - pipe->writers++;
42025 + atomic_dec(&pipe->readers);
42026 + atomic_inc(&pipe->writers);
42027 pipe_unlock(pipe);
42028
42029 }
42030 @@ -2094,7 +2388,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42031 int retval = 0;
42032 int flag = 0;
42033 int ispipe;
42034 - static atomic_t core_dump_count = ATOMIC_INIT(0);
42035 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42036 struct coredump_params cprm = {
42037 .signr = signr,
42038 .regs = regs,
42039 @@ -2109,6 +2403,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42040
42041 audit_core_dumps(signr);
42042
42043 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42044 + gr_handle_brute_attach(current, cprm.mm_flags);
42045 +
42046 binfmt = mm->binfmt;
42047 if (!binfmt || !binfmt->core_dump)
42048 goto fail;
42049 @@ -2176,7 +2473,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42050 }
42051 cprm.limit = RLIM_INFINITY;
42052
42053 - dump_count = atomic_inc_return(&core_dump_count);
42054 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
42055 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42056 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42057 task_tgid_vnr(current), current->comm);
42058 @@ -2203,6 +2500,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42059 } else {
42060 struct inode *inode;
42061
42062 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42063 +
42064 if (cprm.limit < binfmt->min_coredump)
42065 goto fail_unlock;
42066
42067 @@ -2246,7 +2545,7 @@ close_fail:
42068 filp_close(cprm.file, NULL);
42069 fail_dropcount:
42070 if (ispipe)
42071 - atomic_dec(&core_dump_count);
42072 + atomic_dec_unchecked(&core_dump_count);
42073 fail_unlock:
42074 kfree(cn.corename);
42075 fail_corename:
42076 @@ -2265,7 +2564,7 @@ fail:
42077 */
42078 int dump_write(struct file *file, const void *addr, int nr)
42079 {
42080 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42081 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42082 }
42083 EXPORT_SYMBOL(dump_write);
42084
42085 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42086 index a8cbe1b..fed04cb 100644
42087 --- a/fs/ext2/balloc.c
42088 +++ b/fs/ext2/balloc.c
42089 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42090
42091 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42092 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42093 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42094 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42095 sbi->s_resuid != current_fsuid() &&
42096 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42097 return 0;
42098 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42099 index a203892..4e64db5 100644
42100 --- a/fs/ext3/balloc.c
42101 +++ b/fs/ext3/balloc.c
42102 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42103
42104 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42105 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42106 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42107 + if (free_blocks < root_blocks + 1 &&
42108 !use_reservation && sbi->s_resuid != current_fsuid() &&
42109 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42110 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42111 + !capable_nolog(CAP_SYS_RESOURCE)) {
42112 return 0;
42113 }
42114 return 1;
42115 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42116 index 12ccacd..a6035fce0 100644
42117 --- a/fs/ext4/balloc.c
42118 +++ b/fs/ext4/balloc.c
42119 @@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42120 /* Hm, nope. Are (enough) root reserved clusters available? */
42121 if (sbi->s_resuid == current_fsuid() ||
42122 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42123 - capable(CAP_SYS_RESOURCE) ||
42124 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42125 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42126 + capable_nolog(CAP_SYS_RESOURCE)) {
42127
42128 if (free_clusters >= (nclusters + dirty_clusters))
42129 return 1;
42130 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42131 index 5b0e26a..0aa002d 100644
42132 --- a/fs/ext4/ext4.h
42133 +++ b/fs/ext4/ext4.h
42134 @@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42135 unsigned long s_mb_last_start;
42136
42137 /* stats for buddy allocator */
42138 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42139 - atomic_t s_bal_success; /* we found long enough chunks */
42140 - atomic_t s_bal_allocated; /* in blocks */
42141 - atomic_t s_bal_ex_scanned; /* total extents scanned */
42142 - atomic_t s_bal_goals; /* goal hits */
42143 - atomic_t s_bal_breaks; /* too long searches */
42144 - atomic_t s_bal_2orders; /* 2^order hits */
42145 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42146 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42147 + atomic_unchecked_t s_bal_allocated; /* in blocks */
42148 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42149 + atomic_unchecked_t s_bal_goals; /* goal hits */
42150 + atomic_unchecked_t s_bal_breaks; /* too long searches */
42151 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42152 spinlock_t s_bal_lock;
42153 unsigned long s_mb_buddies_generated;
42154 unsigned long long s_mb_generation_time;
42155 - atomic_t s_mb_lost_chunks;
42156 - atomic_t s_mb_preallocated;
42157 - atomic_t s_mb_discarded;
42158 + atomic_unchecked_t s_mb_lost_chunks;
42159 + atomic_unchecked_t s_mb_preallocated;
42160 + atomic_unchecked_t s_mb_discarded;
42161 atomic_t s_lock_busy;
42162
42163 /* locality groups */
42164 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42165 index e2d8be8..c7f0ce9 100644
42166 --- a/fs/ext4/mballoc.c
42167 +++ b/fs/ext4/mballoc.c
42168 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42169 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42170
42171 if (EXT4_SB(sb)->s_mb_stats)
42172 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42173 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42174
42175 break;
42176 }
42177 @@ -2088,7 +2088,7 @@ repeat:
42178 ac->ac_status = AC_STATUS_CONTINUE;
42179 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42180 cr = 3;
42181 - atomic_inc(&sbi->s_mb_lost_chunks);
42182 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42183 goto repeat;
42184 }
42185 }
42186 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42187 if (sbi->s_mb_stats) {
42188 ext4_msg(sb, KERN_INFO,
42189 "mballoc: %u blocks %u reqs (%u success)",
42190 - atomic_read(&sbi->s_bal_allocated),
42191 - atomic_read(&sbi->s_bal_reqs),
42192 - atomic_read(&sbi->s_bal_success));
42193 + atomic_read_unchecked(&sbi->s_bal_allocated),
42194 + atomic_read_unchecked(&sbi->s_bal_reqs),
42195 + atomic_read_unchecked(&sbi->s_bal_success));
42196 ext4_msg(sb, KERN_INFO,
42197 "mballoc: %u extents scanned, %u goal hits, "
42198 "%u 2^N hits, %u breaks, %u lost",
42199 - atomic_read(&sbi->s_bal_ex_scanned),
42200 - atomic_read(&sbi->s_bal_goals),
42201 - atomic_read(&sbi->s_bal_2orders),
42202 - atomic_read(&sbi->s_bal_breaks),
42203 - atomic_read(&sbi->s_mb_lost_chunks));
42204 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42205 + atomic_read_unchecked(&sbi->s_bal_goals),
42206 + atomic_read_unchecked(&sbi->s_bal_2orders),
42207 + atomic_read_unchecked(&sbi->s_bal_breaks),
42208 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42209 ext4_msg(sb, KERN_INFO,
42210 "mballoc: %lu generated and it took %Lu",
42211 sbi->s_mb_buddies_generated,
42212 sbi->s_mb_generation_time);
42213 ext4_msg(sb, KERN_INFO,
42214 "mballoc: %u preallocated, %u discarded",
42215 - atomic_read(&sbi->s_mb_preallocated),
42216 - atomic_read(&sbi->s_mb_discarded));
42217 + atomic_read_unchecked(&sbi->s_mb_preallocated),
42218 + atomic_read_unchecked(&sbi->s_mb_discarded));
42219 }
42220
42221 free_percpu(sbi->s_locality_groups);
42222 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42223 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42224
42225 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42226 - atomic_inc(&sbi->s_bal_reqs);
42227 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42228 + atomic_inc_unchecked(&sbi->s_bal_reqs);
42229 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42230 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42231 - atomic_inc(&sbi->s_bal_success);
42232 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42233 + atomic_inc_unchecked(&sbi->s_bal_success);
42234 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42235 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42236 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42237 - atomic_inc(&sbi->s_bal_goals);
42238 + atomic_inc_unchecked(&sbi->s_bal_goals);
42239 if (ac->ac_found > sbi->s_mb_max_to_scan)
42240 - atomic_inc(&sbi->s_bal_breaks);
42241 + atomic_inc_unchecked(&sbi->s_bal_breaks);
42242 }
42243
42244 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42245 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42246 trace_ext4_mb_new_inode_pa(ac, pa);
42247
42248 ext4_mb_use_inode_pa(ac, pa);
42249 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42250 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42251
42252 ei = EXT4_I(ac->ac_inode);
42253 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42254 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42255 trace_ext4_mb_new_group_pa(ac, pa);
42256
42257 ext4_mb_use_group_pa(ac, pa);
42258 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42259 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42260
42261 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42262 lg = ac->ac_lg;
42263 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42264 * from the bitmap and continue.
42265 */
42266 }
42267 - atomic_add(free, &sbi->s_mb_discarded);
42268 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
42269
42270 return err;
42271 }
42272 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42273 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42274 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42275 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42276 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42277 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42278 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42279
42280 return 0;
42281 diff --git a/fs/fcntl.c b/fs/fcntl.c
42282 index 22764c7..86372c9 100644
42283 --- a/fs/fcntl.c
42284 +++ b/fs/fcntl.c
42285 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42286 if (err)
42287 return err;
42288
42289 + if (gr_handle_chroot_fowner(pid, type))
42290 + return -ENOENT;
42291 + if (gr_check_protected_task_fowner(pid, type))
42292 + return -EACCES;
42293 +
42294 f_modown(filp, pid, type, force);
42295 return 0;
42296 }
42297 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42298
42299 static int f_setown_ex(struct file *filp, unsigned long arg)
42300 {
42301 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42302 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42303 struct f_owner_ex owner;
42304 struct pid *pid;
42305 int type;
42306 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42307
42308 static int f_getown_ex(struct file *filp, unsigned long arg)
42309 {
42310 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42311 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42312 struct f_owner_ex owner;
42313 int ret = 0;
42314
42315 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42316 switch (cmd) {
42317 case F_DUPFD:
42318 case F_DUPFD_CLOEXEC:
42319 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42320 if (arg >= rlimit(RLIMIT_NOFILE))
42321 break;
42322 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42323 diff --git a/fs/fifo.c b/fs/fifo.c
42324 index b1a524d..4ee270e 100644
42325 --- a/fs/fifo.c
42326 +++ b/fs/fifo.c
42327 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42328 */
42329 filp->f_op = &read_pipefifo_fops;
42330 pipe->r_counter++;
42331 - if (pipe->readers++ == 0)
42332 + if (atomic_inc_return(&pipe->readers) == 1)
42333 wake_up_partner(inode);
42334
42335 - if (!pipe->writers) {
42336 + if (!atomic_read(&pipe->writers)) {
42337 if ((filp->f_flags & O_NONBLOCK)) {
42338 /* suppress POLLHUP until we have
42339 * seen a writer */
42340 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42341 * errno=ENXIO when there is no process reading the FIFO.
42342 */
42343 ret = -ENXIO;
42344 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42345 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42346 goto err;
42347
42348 filp->f_op = &write_pipefifo_fops;
42349 pipe->w_counter++;
42350 - if (!pipe->writers++)
42351 + if (atomic_inc_return(&pipe->writers) == 1)
42352 wake_up_partner(inode);
42353
42354 - if (!pipe->readers) {
42355 + if (!atomic_read(&pipe->readers)) {
42356 wait_for_partner(inode, &pipe->r_counter);
42357 if (signal_pending(current))
42358 goto err_wr;
42359 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42360 */
42361 filp->f_op = &rdwr_pipefifo_fops;
42362
42363 - pipe->readers++;
42364 - pipe->writers++;
42365 + atomic_inc(&pipe->readers);
42366 + atomic_inc(&pipe->writers);
42367 pipe->r_counter++;
42368 pipe->w_counter++;
42369 - if (pipe->readers == 1 || pipe->writers == 1)
42370 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42371 wake_up_partner(inode);
42372 break;
42373
42374 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42375 return 0;
42376
42377 err_rd:
42378 - if (!--pipe->readers)
42379 + if (atomic_dec_and_test(&pipe->readers))
42380 wake_up_interruptible(&pipe->wait);
42381 ret = -ERESTARTSYS;
42382 goto err;
42383
42384 err_wr:
42385 - if (!--pipe->writers)
42386 + if (atomic_dec_and_test(&pipe->writers))
42387 wake_up_interruptible(&pipe->wait);
42388 ret = -ERESTARTSYS;
42389 goto err;
42390
42391 err:
42392 - if (!pipe->readers && !pipe->writers)
42393 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42394 free_pipe_info(inode);
42395
42396 err_nocleanup:
42397 diff --git a/fs/file.c b/fs/file.c
42398 index 4c6992d..104cdea 100644
42399 --- a/fs/file.c
42400 +++ b/fs/file.c
42401 @@ -15,6 +15,7 @@
42402 #include <linux/slab.h>
42403 #include <linux/vmalloc.h>
42404 #include <linux/file.h>
42405 +#include <linux/security.h>
42406 #include <linux/fdtable.h>
42407 #include <linux/bitops.h>
42408 #include <linux/interrupt.h>
42409 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42410 * N.B. For clone tasks sharing a files structure, this test
42411 * will limit the total number of files that can be opened.
42412 */
42413 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42414 if (nr >= rlimit(RLIMIT_NOFILE))
42415 return -EMFILE;
42416
42417 diff --git a/fs/filesystems.c b/fs/filesystems.c
42418 index 0845f84..7b4ebef 100644
42419 --- a/fs/filesystems.c
42420 +++ b/fs/filesystems.c
42421 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42422 int len = dot ? dot - name : strlen(name);
42423
42424 fs = __get_fs_type(name, len);
42425 +
42426 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
42427 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42428 +#else
42429 if (!fs && (request_module("%.*s", len, name) == 0))
42430 +#endif
42431 fs = __get_fs_type(name, len);
42432
42433 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42434 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42435 index 78b519c..212c0d0 100644
42436 --- a/fs/fs_struct.c
42437 +++ b/fs/fs_struct.c
42438 @@ -4,6 +4,7 @@
42439 #include <linux/path.h>
42440 #include <linux/slab.h>
42441 #include <linux/fs_struct.h>
42442 +#include <linux/grsecurity.h>
42443 #include "internal.h"
42444
42445 static inline void path_get_longterm(struct path *path)
42446 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42447 old_root = fs->root;
42448 fs->root = *path;
42449 path_get_longterm(path);
42450 + gr_set_chroot_entries(current, path);
42451 write_seqcount_end(&fs->seq);
42452 spin_unlock(&fs->lock);
42453 if (old_root.dentry)
42454 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42455 && fs->root.mnt == old_root->mnt) {
42456 path_get_longterm(new_root);
42457 fs->root = *new_root;
42458 + gr_set_chroot_entries(p, new_root);
42459 count++;
42460 }
42461 if (fs->pwd.dentry == old_root->dentry
42462 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42463 spin_lock(&fs->lock);
42464 write_seqcount_begin(&fs->seq);
42465 tsk->fs = NULL;
42466 - kill = !--fs->users;
42467 + gr_clear_chroot_entries(tsk);
42468 + kill = !atomic_dec_return(&fs->users);
42469 write_seqcount_end(&fs->seq);
42470 spin_unlock(&fs->lock);
42471 task_unlock(tsk);
42472 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42473 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42474 /* We don't need to lock fs - think why ;-) */
42475 if (fs) {
42476 - fs->users = 1;
42477 + atomic_set(&fs->users, 1);
42478 fs->in_exec = 0;
42479 spin_lock_init(&fs->lock);
42480 seqcount_init(&fs->seq);
42481 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42482 spin_lock(&old->lock);
42483 fs->root = old->root;
42484 path_get_longterm(&fs->root);
42485 + /* instead of calling gr_set_chroot_entries here,
42486 + we call it from every caller of this function
42487 + */
42488 fs->pwd = old->pwd;
42489 path_get_longterm(&fs->pwd);
42490 spin_unlock(&old->lock);
42491 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42492
42493 task_lock(current);
42494 spin_lock(&fs->lock);
42495 - kill = !--fs->users;
42496 + kill = !atomic_dec_return(&fs->users);
42497 current->fs = new_fs;
42498 + gr_set_chroot_entries(current, &new_fs->root);
42499 spin_unlock(&fs->lock);
42500 task_unlock(current);
42501
42502 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
42503
42504 /* to be mentioned only in INIT_TASK */
42505 struct fs_struct init_fs = {
42506 - .users = 1,
42507 + .users = ATOMIC_INIT(1),
42508 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42509 .seq = SEQCNT_ZERO,
42510 .umask = 0022,
42511 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42512 task_lock(current);
42513
42514 spin_lock(&init_fs.lock);
42515 - init_fs.users++;
42516 + atomic_inc(&init_fs.users);
42517 spin_unlock(&init_fs.lock);
42518
42519 spin_lock(&fs->lock);
42520 current->fs = &init_fs;
42521 - kill = !--fs->users;
42522 + gr_set_chroot_entries(current, &current->fs->root);
42523 + kill = !atomic_dec_return(&fs->users);
42524 spin_unlock(&fs->lock);
42525
42526 task_unlock(current);
42527 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42528 index 9905350..02eaec4 100644
42529 --- a/fs/fscache/cookie.c
42530 +++ b/fs/fscache/cookie.c
42531 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42532 parent ? (char *) parent->def->name : "<no-parent>",
42533 def->name, netfs_data);
42534
42535 - fscache_stat(&fscache_n_acquires);
42536 + fscache_stat_unchecked(&fscache_n_acquires);
42537
42538 /* if there's no parent cookie, then we don't create one here either */
42539 if (!parent) {
42540 - fscache_stat(&fscache_n_acquires_null);
42541 + fscache_stat_unchecked(&fscache_n_acquires_null);
42542 _leave(" [no parent]");
42543 return NULL;
42544 }
42545 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42546 /* allocate and initialise a cookie */
42547 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42548 if (!cookie) {
42549 - fscache_stat(&fscache_n_acquires_oom);
42550 + fscache_stat_unchecked(&fscache_n_acquires_oom);
42551 _leave(" [ENOMEM]");
42552 return NULL;
42553 }
42554 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42555
42556 switch (cookie->def->type) {
42557 case FSCACHE_COOKIE_TYPE_INDEX:
42558 - fscache_stat(&fscache_n_cookie_index);
42559 + fscache_stat_unchecked(&fscache_n_cookie_index);
42560 break;
42561 case FSCACHE_COOKIE_TYPE_DATAFILE:
42562 - fscache_stat(&fscache_n_cookie_data);
42563 + fscache_stat_unchecked(&fscache_n_cookie_data);
42564 break;
42565 default:
42566 - fscache_stat(&fscache_n_cookie_special);
42567 + fscache_stat_unchecked(&fscache_n_cookie_special);
42568 break;
42569 }
42570
42571 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42572 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42573 atomic_dec(&parent->n_children);
42574 __fscache_cookie_put(cookie);
42575 - fscache_stat(&fscache_n_acquires_nobufs);
42576 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42577 _leave(" = NULL");
42578 return NULL;
42579 }
42580 }
42581
42582 - fscache_stat(&fscache_n_acquires_ok);
42583 + fscache_stat_unchecked(&fscache_n_acquires_ok);
42584 _leave(" = %p", cookie);
42585 return cookie;
42586 }
42587 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42588 cache = fscache_select_cache_for_object(cookie->parent);
42589 if (!cache) {
42590 up_read(&fscache_addremove_sem);
42591 - fscache_stat(&fscache_n_acquires_no_cache);
42592 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42593 _leave(" = -ENOMEDIUM [no cache]");
42594 return -ENOMEDIUM;
42595 }
42596 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42597 object = cache->ops->alloc_object(cache, cookie);
42598 fscache_stat_d(&fscache_n_cop_alloc_object);
42599 if (IS_ERR(object)) {
42600 - fscache_stat(&fscache_n_object_no_alloc);
42601 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
42602 ret = PTR_ERR(object);
42603 goto error;
42604 }
42605
42606 - fscache_stat(&fscache_n_object_alloc);
42607 + fscache_stat_unchecked(&fscache_n_object_alloc);
42608
42609 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42610
42611 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42612 struct fscache_object *object;
42613 struct hlist_node *_p;
42614
42615 - fscache_stat(&fscache_n_updates);
42616 + fscache_stat_unchecked(&fscache_n_updates);
42617
42618 if (!cookie) {
42619 - fscache_stat(&fscache_n_updates_null);
42620 + fscache_stat_unchecked(&fscache_n_updates_null);
42621 _leave(" [no cookie]");
42622 return;
42623 }
42624 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42625 struct fscache_object *object;
42626 unsigned long event;
42627
42628 - fscache_stat(&fscache_n_relinquishes);
42629 + fscache_stat_unchecked(&fscache_n_relinquishes);
42630 if (retire)
42631 - fscache_stat(&fscache_n_relinquishes_retire);
42632 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42633
42634 if (!cookie) {
42635 - fscache_stat(&fscache_n_relinquishes_null);
42636 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
42637 _leave(" [no cookie]");
42638 return;
42639 }
42640 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42641
42642 /* wait for the cookie to finish being instantiated (or to fail) */
42643 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42644 - fscache_stat(&fscache_n_relinquishes_waitcrt);
42645 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42646 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42647 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42648 }
42649 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
42650 index f6aad48..88dcf26 100644
42651 --- a/fs/fscache/internal.h
42652 +++ b/fs/fscache/internal.h
42653 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42654 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42655 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42656
42657 -extern atomic_t fscache_n_op_pend;
42658 -extern atomic_t fscache_n_op_run;
42659 -extern atomic_t fscache_n_op_enqueue;
42660 -extern atomic_t fscache_n_op_deferred_release;
42661 -extern atomic_t fscache_n_op_release;
42662 -extern atomic_t fscache_n_op_gc;
42663 -extern atomic_t fscache_n_op_cancelled;
42664 -extern atomic_t fscache_n_op_rejected;
42665 +extern atomic_unchecked_t fscache_n_op_pend;
42666 +extern atomic_unchecked_t fscache_n_op_run;
42667 +extern atomic_unchecked_t fscache_n_op_enqueue;
42668 +extern atomic_unchecked_t fscache_n_op_deferred_release;
42669 +extern atomic_unchecked_t fscache_n_op_release;
42670 +extern atomic_unchecked_t fscache_n_op_gc;
42671 +extern atomic_unchecked_t fscache_n_op_cancelled;
42672 +extern atomic_unchecked_t fscache_n_op_rejected;
42673
42674 -extern atomic_t fscache_n_attr_changed;
42675 -extern atomic_t fscache_n_attr_changed_ok;
42676 -extern atomic_t fscache_n_attr_changed_nobufs;
42677 -extern atomic_t fscache_n_attr_changed_nomem;
42678 -extern atomic_t fscache_n_attr_changed_calls;
42679 +extern atomic_unchecked_t fscache_n_attr_changed;
42680 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
42681 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42682 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42683 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
42684
42685 -extern atomic_t fscache_n_allocs;
42686 -extern atomic_t fscache_n_allocs_ok;
42687 -extern atomic_t fscache_n_allocs_wait;
42688 -extern atomic_t fscache_n_allocs_nobufs;
42689 -extern atomic_t fscache_n_allocs_intr;
42690 -extern atomic_t fscache_n_allocs_object_dead;
42691 -extern atomic_t fscache_n_alloc_ops;
42692 -extern atomic_t fscache_n_alloc_op_waits;
42693 +extern atomic_unchecked_t fscache_n_allocs;
42694 +extern atomic_unchecked_t fscache_n_allocs_ok;
42695 +extern atomic_unchecked_t fscache_n_allocs_wait;
42696 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
42697 +extern atomic_unchecked_t fscache_n_allocs_intr;
42698 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
42699 +extern atomic_unchecked_t fscache_n_alloc_ops;
42700 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
42701
42702 -extern atomic_t fscache_n_retrievals;
42703 -extern atomic_t fscache_n_retrievals_ok;
42704 -extern atomic_t fscache_n_retrievals_wait;
42705 -extern atomic_t fscache_n_retrievals_nodata;
42706 -extern atomic_t fscache_n_retrievals_nobufs;
42707 -extern atomic_t fscache_n_retrievals_intr;
42708 -extern atomic_t fscache_n_retrievals_nomem;
42709 -extern atomic_t fscache_n_retrievals_object_dead;
42710 -extern atomic_t fscache_n_retrieval_ops;
42711 -extern atomic_t fscache_n_retrieval_op_waits;
42712 +extern atomic_unchecked_t fscache_n_retrievals;
42713 +extern atomic_unchecked_t fscache_n_retrievals_ok;
42714 +extern atomic_unchecked_t fscache_n_retrievals_wait;
42715 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
42716 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42717 +extern atomic_unchecked_t fscache_n_retrievals_intr;
42718 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
42719 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42720 +extern atomic_unchecked_t fscache_n_retrieval_ops;
42721 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42722
42723 -extern atomic_t fscache_n_stores;
42724 -extern atomic_t fscache_n_stores_ok;
42725 -extern atomic_t fscache_n_stores_again;
42726 -extern atomic_t fscache_n_stores_nobufs;
42727 -extern atomic_t fscache_n_stores_oom;
42728 -extern atomic_t fscache_n_store_ops;
42729 -extern atomic_t fscache_n_store_calls;
42730 -extern atomic_t fscache_n_store_pages;
42731 -extern atomic_t fscache_n_store_radix_deletes;
42732 -extern atomic_t fscache_n_store_pages_over_limit;
42733 +extern atomic_unchecked_t fscache_n_stores;
42734 +extern atomic_unchecked_t fscache_n_stores_ok;
42735 +extern atomic_unchecked_t fscache_n_stores_again;
42736 +extern atomic_unchecked_t fscache_n_stores_nobufs;
42737 +extern atomic_unchecked_t fscache_n_stores_oom;
42738 +extern atomic_unchecked_t fscache_n_store_ops;
42739 +extern atomic_unchecked_t fscache_n_store_calls;
42740 +extern atomic_unchecked_t fscache_n_store_pages;
42741 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
42742 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42743
42744 -extern atomic_t fscache_n_store_vmscan_not_storing;
42745 -extern atomic_t fscache_n_store_vmscan_gone;
42746 -extern atomic_t fscache_n_store_vmscan_busy;
42747 -extern atomic_t fscache_n_store_vmscan_cancelled;
42748 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42749 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42750 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42751 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42752
42753 -extern atomic_t fscache_n_marks;
42754 -extern atomic_t fscache_n_uncaches;
42755 +extern atomic_unchecked_t fscache_n_marks;
42756 +extern atomic_unchecked_t fscache_n_uncaches;
42757
42758 -extern atomic_t fscache_n_acquires;
42759 -extern atomic_t fscache_n_acquires_null;
42760 -extern atomic_t fscache_n_acquires_no_cache;
42761 -extern atomic_t fscache_n_acquires_ok;
42762 -extern atomic_t fscache_n_acquires_nobufs;
42763 -extern atomic_t fscache_n_acquires_oom;
42764 +extern atomic_unchecked_t fscache_n_acquires;
42765 +extern atomic_unchecked_t fscache_n_acquires_null;
42766 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
42767 +extern atomic_unchecked_t fscache_n_acquires_ok;
42768 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
42769 +extern atomic_unchecked_t fscache_n_acquires_oom;
42770
42771 -extern atomic_t fscache_n_updates;
42772 -extern atomic_t fscache_n_updates_null;
42773 -extern atomic_t fscache_n_updates_run;
42774 +extern atomic_unchecked_t fscache_n_updates;
42775 +extern atomic_unchecked_t fscache_n_updates_null;
42776 +extern atomic_unchecked_t fscache_n_updates_run;
42777
42778 -extern atomic_t fscache_n_relinquishes;
42779 -extern atomic_t fscache_n_relinquishes_null;
42780 -extern atomic_t fscache_n_relinquishes_waitcrt;
42781 -extern atomic_t fscache_n_relinquishes_retire;
42782 +extern atomic_unchecked_t fscache_n_relinquishes;
42783 +extern atomic_unchecked_t fscache_n_relinquishes_null;
42784 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42785 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
42786
42787 -extern atomic_t fscache_n_cookie_index;
42788 -extern atomic_t fscache_n_cookie_data;
42789 -extern atomic_t fscache_n_cookie_special;
42790 +extern atomic_unchecked_t fscache_n_cookie_index;
42791 +extern atomic_unchecked_t fscache_n_cookie_data;
42792 +extern atomic_unchecked_t fscache_n_cookie_special;
42793
42794 -extern atomic_t fscache_n_object_alloc;
42795 -extern atomic_t fscache_n_object_no_alloc;
42796 -extern atomic_t fscache_n_object_lookups;
42797 -extern atomic_t fscache_n_object_lookups_negative;
42798 -extern atomic_t fscache_n_object_lookups_positive;
42799 -extern atomic_t fscache_n_object_lookups_timed_out;
42800 -extern atomic_t fscache_n_object_created;
42801 -extern atomic_t fscache_n_object_avail;
42802 -extern atomic_t fscache_n_object_dead;
42803 +extern atomic_unchecked_t fscache_n_object_alloc;
42804 +extern atomic_unchecked_t fscache_n_object_no_alloc;
42805 +extern atomic_unchecked_t fscache_n_object_lookups;
42806 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
42807 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
42808 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42809 +extern atomic_unchecked_t fscache_n_object_created;
42810 +extern atomic_unchecked_t fscache_n_object_avail;
42811 +extern atomic_unchecked_t fscache_n_object_dead;
42812
42813 -extern atomic_t fscache_n_checkaux_none;
42814 -extern atomic_t fscache_n_checkaux_okay;
42815 -extern atomic_t fscache_n_checkaux_update;
42816 -extern atomic_t fscache_n_checkaux_obsolete;
42817 +extern atomic_unchecked_t fscache_n_checkaux_none;
42818 +extern atomic_unchecked_t fscache_n_checkaux_okay;
42819 +extern atomic_unchecked_t fscache_n_checkaux_update;
42820 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42821
42822 extern atomic_t fscache_n_cop_alloc_object;
42823 extern atomic_t fscache_n_cop_lookup_object;
42824 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
42825 atomic_inc(stat);
42826 }
42827
42828 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42829 +{
42830 + atomic_inc_unchecked(stat);
42831 +}
42832 +
42833 static inline void fscache_stat_d(atomic_t *stat)
42834 {
42835 atomic_dec(stat);
42836 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
42837
42838 #define __fscache_stat(stat) (NULL)
42839 #define fscache_stat(stat) do {} while (0)
42840 +#define fscache_stat_unchecked(stat) do {} while (0)
42841 #define fscache_stat_d(stat) do {} while (0)
42842 #endif
42843
42844 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
42845 index b6b897c..0ffff9c 100644
42846 --- a/fs/fscache/object.c
42847 +++ b/fs/fscache/object.c
42848 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42849 /* update the object metadata on disk */
42850 case FSCACHE_OBJECT_UPDATING:
42851 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42852 - fscache_stat(&fscache_n_updates_run);
42853 + fscache_stat_unchecked(&fscache_n_updates_run);
42854 fscache_stat(&fscache_n_cop_update_object);
42855 object->cache->ops->update_object(object);
42856 fscache_stat_d(&fscache_n_cop_update_object);
42857 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42858 spin_lock(&object->lock);
42859 object->state = FSCACHE_OBJECT_DEAD;
42860 spin_unlock(&object->lock);
42861 - fscache_stat(&fscache_n_object_dead);
42862 + fscache_stat_unchecked(&fscache_n_object_dead);
42863 goto terminal_transit;
42864
42865 /* handle the parent cache of this object being withdrawn from
42866 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42867 spin_lock(&object->lock);
42868 object->state = FSCACHE_OBJECT_DEAD;
42869 spin_unlock(&object->lock);
42870 - fscache_stat(&fscache_n_object_dead);
42871 + fscache_stat_unchecked(&fscache_n_object_dead);
42872 goto terminal_transit;
42873
42874 /* complain about the object being woken up once it is
42875 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
42876 parent->cookie->def->name, cookie->def->name,
42877 object->cache->tag->name);
42878
42879 - fscache_stat(&fscache_n_object_lookups);
42880 + fscache_stat_unchecked(&fscache_n_object_lookups);
42881 fscache_stat(&fscache_n_cop_lookup_object);
42882 ret = object->cache->ops->lookup_object(object);
42883 fscache_stat_d(&fscache_n_cop_lookup_object);
42884 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
42885 if (ret == -ETIMEDOUT) {
42886 /* probably stuck behind another object, so move this one to
42887 * the back of the queue */
42888 - fscache_stat(&fscache_n_object_lookups_timed_out);
42889 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42890 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42891 }
42892
42893 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
42894
42895 spin_lock(&object->lock);
42896 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42897 - fscache_stat(&fscache_n_object_lookups_negative);
42898 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42899
42900 /* transit here to allow write requests to begin stacking up
42901 * and read requests to begin returning ENODATA */
42902 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
42903 * result, in which case there may be data available */
42904 spin_lock(&object->lock);
42905 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42906 - fscache_stat(&fscache_n_object_lookups_positive);
42907 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42908
42909 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42910
42911 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
42912 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42913 } else {
42914 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42915 - fscache_stat(&fscache_n_object_created);
42916 + fscache_stat_unchecked(&fscache_n_object_created);
42917
42918 object->state = FSCACHE_OBJECT_AVAILABLE;
42919 spin_unlock(&object->lock);
42920 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
42921 fscache_enqueue_dependents(object);
42922
42923 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42924 - fscache_stat(&fscache_n_object_avail);
42925 + fscache_stat_unchecked(&fscache_n_object_avail);
42926
42927 _leave("");
42928 }
42929 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
42930 enum fscache_checkaux result;
42931
42932 if (!object->cookie->def->check_aux) {
42933 - fscache_stat(&fscache_n_checkaux_none);
42934 + fscache_stat_unchecked(&fscache_n_checkaux_none);
42935 return FSCACHE_CHECKAUX_OKAY;
42936 }
42937
42938 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
42939 switch (result) {
42940 /* entry okay as is */
42941 case FSCACHE_CHECKAUX_OKAY:
42942 - fscache_stat(&fscache_n_checkaux_okay);
42943 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
42944 break;
42945
42946 /* entry requires update */
42947 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42948 - fscache_stat(&fscache_n_checkaux_update);
42949 + fscache_stat_unchecked(&fscache_n_checkaux_update);
42950 break;
42951
42952 /* entry requires deletion */
42953 case FSCACHE_CHECKAUX_OBSOLETE:
42954 - fscache_stat(&fscache_n_checkaux_obsolete);
42955 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42956 break;
42957
42958 default:
42959 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
42960 index 30afdfa..2256596 100644
42961 --- a/fs/fscache/operation.c
42962 +++ b/fs/fscache/operation.c
42963 @@ -17,7 +17,7 @@
42964 #include <linux/slab.h>
42965 #include "internal.h"
42966
42967 -atomic_t fscache_op_debug_id;
42968 +atomic_unchecked_t fscache_op_debug_id;
42969 EXPORT_SYMBOL(fscache_op_debug_id);
42970
42971 /**
42972 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
42973 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42974 ASSERTCMP(atomic_read(&op->usage), >, 0);
42975
42976 - fscache_stat(&fscache_n_op_enqueue);
42977 + fscache_stat_unchecked(&fscache_n_op_enqueue);
42978 switch (op->flags & FSCACHE_OP_TYPE) {
42979 case FSCACHE_OP_ASYNC:
42980 _debug("queue async");
42981 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
42982 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42983 if (op->processor)
42984 fscache_enqueue_operation(op);
42985 - fscache_stat(&fscache_n_op_run);
42986 + fscache_stat_unchecked(&fscache_n_op_run);
42987 }
42988
42989 /*
42990 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
42991 if (object->n_ops > 1) {
42992 atomic_inc(&op->usage);
42993 list_add_tail(&op->pend_link, &object->pending_ops);
42994 - fscache_stat(&fscache_n_op_pend);
42995 + fscache_stat_unchecked(&fscache_n_op_pend);
42996 } else if (!list_empty(&object->pending_ops)) {
42997 atomic_inc(&op->usage);
42998 list_add_tail(&op->pend_link, &object->pending_ops);
42999 - fscache_stat(&fscache_n_op_pend);
43000 + fscache_stat_unchecked(&fscache_n_op_pend);
43001 fscache_start_operations(object);
43002 } else {
43003 ASSERTCMP(object->n_in_progress, ==, 0);
43004 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43005 object->n_exclusive++; /* reads and writes must wait */
43006 atomic_inc(&op->usage);
43007 list_add_tail(&op->pend_link, &object->pending_ops);
43008 - fscache_stat(&fscache_n_op_pend);
43009 + fscache_stat_unchecked(&fscache_n_op_pend);
43010 ret = 0;
43011 } else {
43012 /* not allowed to submit ops in any other state */
43013 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43014 if (object->n_exclusive > 0) {
43015 atomic_inc(&op->usage);
43016 list_add_tail(&op->pend_link, &object->pending_ops);
43017 - fscache_stat(&fscache_n_op_pend);
43018 + fscache_stat_unchecked(&fscache_n_op_pend);
43019 } else if (!list_empty(&object->pending_ops)) {
43020 atomic_inc(&op->usage);
43021 list_add_tail(&op->pend_link, &object->pending_ops);
43022 - fscache_stat(&fscache_n_op_pend);
43023 + fscache_stat_unchecked(&fscache_n_op_pend);
43024 fscache_start_operations(object);
43025 } else {
43026 ASSERTCMP(object->n_exclusive, ==, 0);
43027 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43028 object->n_ops++;
43029 atomic_inc(&op->usage);
43030 list_add_tail(&op->pend_link, &object->pending_ops);
43031 - fscache_stat(&fscache_n_op_pend);
43032 + fscache_stat_unchecked(&fscache_n_op_pend);
43033 ret = 0;
43034 } else if (object->state == FSCACHE_OBJECT_DYING ||
43035 object->state == FSCACHE_OBJECT_LC_DYING ||
43036 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43037 - fscache_stat(&fscache_n_op_rejected);
43038 + fscache_stat_unchecked(&fscache_n_op_rejected);
43039 ret = -ENOBUFS;
43040 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43041 fscache_report_unexpected_submission(object, op, ostate);
43042 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43043
43044 ret = -EBUSY;
43045 if (!list_empty(&op->pend_link)) {
43046 - fscache_stat(&fscache_n_op_cancelled);
43047 + fscache_stat_unchecked(&fscache_n_op_cancelled);
43048 list_del_init(&op->pend_link);
43049 object->n_ops--;
43050 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43051 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43052 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43053 BUG();
43054
43055 - fscache_stat(&fscache_n_op_release);
43056 + fscache_stat_unchecked(&fscache_n_op_release);
43057
43058 if (op->release) {
43059 op->release(op);
43060 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43061 * lock, and defer it otherwise */
43062 if (!spin_trylock(&object->lock)) {
43063 _debug("defer put");
43064 - fscache_stat(&fscache_n_op_deferred_release);
43065 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
43066
43067 cache = object->cache;
43068 spin_lock(&cache->op_gc_list_lock);
43069 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43070
43071 _debug("GC DEFERRED REL OBJ%x OP%x",
43072 object->debug_id, op->debug_id);
43073 - fscache_stat(&fscache_n_op_gc);
43074 + fscache_stat_unchecked(&fscache_n_op_gc);
43075
43076 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43077
43078 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43079 index 3f7a59b..cf196cc 100644
43080 --- a/fs/fscache/page.c
43081 +++ b/fs/fscache/page.c
43082 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43083 val = radix_tree_lookup(&cookie->stores, page->index);
43084 if (!val) {
43085 rcu_read_unlock();
43086 - fscache_stat(&fscache_n_store_vmscan_not_storing);
43087 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43088 __fscache_uncache_page(cookie, page);
43089 return true;
43090 }
43091 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43092 spin_unlock(&cookie->stores_lock);
43093
43094 if (xpage) {
43095 - fscache_stat(&fscache_n_store_vmscan_cancelled);
43096 - fscache_stat(&fscache_n_store_radix_deletes);
43097 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43098 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43099 ASSERTCMP(xpage, ==, page);
43100 } else {
43101 - fscache_stat(&fscache_n_store_vmscan_gone);
43102 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43103 }
43104
43105 wake_up_bit(&cookie->flags, 0);
43106 @@ -107,7 +107,7 @@ page_busy:
43107 /* we might want to wait here, but that could deadlock the allocator as
43108 * the work threads writing to the cache may all end up sleeping
43109 * on memory allocation */
43110 - fscache_stat(&fscache_n_store_vmscan_busy);
43111 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43112 return false;
43113 }
43114 EXPORT_SYMBOL(__fscache_maybe_release_page);
43115 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43116 FSCACHE_COOKIE_STORING_TAG);
43117 if (!radix_tree_tag_get(&cookie->stores, page->index,
43118 FSCACHE_COOKIE_PENDING_TAG)) {
43119 - fscache_stat(&fscache_n_store_radix_deletes);
43120 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43121 xpage = radix_tree_delete(&cookie->stores, page->index);
43122 }
43123 spin_unlock(&cookie->stores_lock);
43124 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43125
43126 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43127
43128 - fscache_stat(&fscache_n_attr_changed_calls);
43129 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43130
43131 if (fscache_object_is_active(object)) {
43132 fscache_stat(&fscache_n_cop_attr_changed);
43133 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43134
43135 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43136
43137 - fscache_stat(&fscache_n_attr_changed);
43138 + fscache_stat_unchecked(&fscache_n_attr_changed);
43139
43140 op = kzalloc(sizeof(*op), GFP_KERNEL);
43141 if (!op) {
43142 - fscache_stat(&fscache_n_attr_changed_nomem);
43143 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43144 _leave(" = -ENOMEM");
43145 return -ENOMEM;
43146 }
43147 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43148 if (fscache_submit_exclusive_op(object, op) < 0)
43149 goto nobufs;
43150 spin_unlock(&cookie->lock);
43151 - fscache_stat(&fscache_n_attr_changed_ok);
43152 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43153 fscache_put_operation(op);
43154 _leave(" = 0");
43155 return 0;
43156 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43157 nobufs:
43158 spin_unlock(&cookie->lock);
43159 kfree(op);
43160 - fscache_stat(&fscache_n_attr_changed_nobufs);
43161 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43162 _leave(" = %d", -ENOBUFS);
43163 return -ENOBUFS;
43164 }
43165 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43166 /* allocate a retrieval operation and attempt to submit it */
43167 op = kzalloc(sizeof(*op), GFP_NOIO);
43168 if (!op) {
43169 - fscache_stat(&fscache_n_retrievals_nomem);
43170 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43171 return NULL;
43172 }
43173
43174 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43175 return 0;
43176 }
43177
43178 - fscache_stat(&fscache_n_retrievals_wait);
43179 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
43180
43181 jif = jiffies;
43182 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43183 fscache_wait_bit_interruptible,
43184 TASK_INTERRUPTIBLE) != 0) {
43185 - fscache_stat(&fscache_n_retrievals_intr);
43186 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43187 _leave(" = -ERESTARTSYS");
43188 return -ERESTARTSYS;
43189 }
43190 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43191 */
43192 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43193 struct fscache_retrieval *op,
43194 - atomic_t *stat_op_waits,
43195 - atomic_t *stat_object_dead)
43196 + atomic_unchecked_t *stat_op_waits,
43197 + atomic_unchecked_t *stat_object_dead)
43198 {
43199 int ret;
43200
43201 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43202 goto check_if_dead;
43203
43204 _debug(">>> WT");
43205 - fscache_stat(stat_op_waits);
43206 + fscache_stat_unchecked(stat_op_waits);
43207 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43208 fscache_wait_bit_interruptible,
43209 TASK_INTERRUPTIBLE) < 0) {
43210 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43211
43212 check_if_dead:
43213 if (unlikely(fscache_object_is_dead(object))) {
43214 - fscache_stat(stat_object_dead);
43215 + fscache_stat_unchecked(stat_object_dead);
43216 return -ENOBUFS;
43217 }
43218 return 0;
43219 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43220
43221 _enter("%p,%p,,,", cookie, page);
43222
43223 - fscache_stat(&fscache_n_retrievals);
43224 + fscache_stat_unchecked(&fscache_n_retrievals);
43225
43226 if (hlist_empty(&cookie->backing_objects))
43227 goto nobufs;
43228 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43229 goto nobufs_unlock;
43230 spin_unlock(&cookie->lock);
43231
43232 - fscache_stat(&fscache_n_retrieval_ops);
43233 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43234
43235 /* pin the netfs read context in case we need to do the actual netfs
43236 * read because we've encountered a cache read failure */
43237 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43238
43239 error:
43240 if (ret == -ENOMEM)
43241 - fscache_stat(&fscache_n_retrievals_nomem);
43242 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43243 else if (ret == -ERESTARTSYS)
43244 - fscache_stat(&fscache_n_retrievals_intr);
43245 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43246 else if (ret == -ENODATA)
43247 - fscache_stat(&fscache_n_retrievals_nodata);
43248 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43249 else if (ret < 0)
43250 - fscache_stat(&fscache_n_retrievals_nobufs);
43251 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43252 else
43253 - fscache_stat(&fscache_n_retrievals_ok);
43254 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43255
43256 fscache_put_retrieval(op);
43257 _leave(" = %d", ret);
43258 @@ -429,7 +429,7 @@ nobufs_unlock:
43259 spin_unlock(&cookie->lock);
43260 kfree(op);
43261 nobufs:
43262 - fscache_stat(&fscache_n_retrievals_nobufs);
43263 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43264 _leave(" = -ENOBUFS");
43265 return -ENOBUFS;
43266 }
43267 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43268
43269 _enter("%p,,%d,,,", cookie, *nr_pages);
43270
43271 - fscache_stat(&fscache_n_retrievals);
43272 + fscache_stat_unchecked(&fscache_n_retrievals);
43273
43274 if (hlist_empty(&cookie->backing_objects))
43275 goto nobufs;
43276 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43277 goto nobufs_unlock;
43278 spin_unlock(&cookie->lock);
43279
43280 - fscache_stat(&fscache_n_retrieval_ops);
43281 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43282
43283 /* pin the netfs read context in case we need to do the actual netfs
43284 * read because we've encountered a cache read failure */
43285 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43286
43287 error:
43288 if (ret == -ENOMEM)
43289 - fscache_stat(&fscache_n_retrievals_nomem);
43290 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43291 else if (ret == -ERESTARTSYS)
43292 - fscache_stat(&fscache_n_retrievals_intr);
43293 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43294 else if (ret == -ENODATA)
43295 - fscache_stat(&fscache_n_retrievals_nodata);
43296 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43297 else if (ret < 0)
43298 - fscache_stat(&fscache_n_retrievals_nobufs);
43299 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43300 else
43301 - fscache_stat(&fscache_n_retrievals_ok);
43302 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43303
43304 fscache_put_retrieval(op);
43305 _leave(" = %d", ret);
43306 @@ -545,7 +545,7 @@ nobufs_unlock:
43307 spin_unlock(&cookie->lock);
43308 kfree(op);
43309 nobufs:
43310 - fscache_stat(&fscache_n_retrievals_nobufs);
43311 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43312 _leave(" = -ENOBUFS");
43313 return -ENOBUFS;
43314 }
43315 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43316
43317 _enter("%p,%p,,,", cookie, page);
43318
43319 - fscache_stat(&fscache_n_allocs);
43320 + fscache_stat_unchecked(&fscache_n_allocs);
43321
43322 if (hlist_empty(&cookie->backing_objects))
43323 goto nobufs;
43324 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43325 goto nobufs_unlock;
43326 spin_unlock(&cookie->lock);
43327
43328 - fscache_stat(&fscache_n_alloc_ops);
43329 + fscache_stat_unchecked(&fscache_n_alloc_ops);
43330
43331 ret = fscache_wait_for_retrieval_activation(
43332 object, op,
43333 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43334
43335 error:
43336 if (ret == -ERESTARTSYS)
43337 - fscache_stat(&fscache_n_allocs_intr);
43338 + fscache_stat_unchecked(&fscache_n_allocs_intr);
43339 else if (ret < 0)
43340 - fscache_stat(&fscache_n_allocs_nobufs);
43341 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43342 else
43343 - fscache_stat(&fscache_n_allocs_ok);
43344 + fscache_stat_unchecked(&fscache_n_allocs_ok);
43345
43346 fscache_put_retrieval(op);
43347 _leave(" = %d", ret);
43348 @@ -625,7 +625,7 @@ nobufs_unlock:
43349 spin_unlock(&cookie->lock);
43350 kfree(op);
43351 nobufs:
43352 - fscache_stat(&fscache_n_allocs_nobufs);
43353 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43354 _leave(" = -ENOBUFS");
43355 return -ENOBUFS;
43356 }
43357 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43358
43359 spin_lock(&cookie->stores_lock);
43360
43361 - fscache_stat(&fscache_n_store_calls);
43362 + fscache_stat_unchecked(&fscache_n_store_calls);
43363
43364 /* find a page to store */
43365 page = NULL;
43366 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43367 page = results[0];
43368 _debug("gang %d [%lx]", n, page->index);
43369 if (page->index > op->store_limit) {
43370 - fscache_stat(&fscache_n_store_pages_over_limit);
43371 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43372 goto superseded;
43373 }
43374
43375 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43376 spin_unlock(&cookie->stores_lock);
43377 spin_unlock(&object->lock);
43378
43379 - fscache_stat(&fscache_n_store_pages);
43380 + fscache_stat_unchecked(&fscache_n_store_pages);
43381 fscache_stat(&fscache_n_cop_write_page);
43382 ret = object->cache->ops->write_page(op, page);
43383 fscache_stat_d(&fscache_n_cop_write_page);
43384 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43385 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43386 ASSERT(PageFsCache(page));
43387
43388 - fscache_stat(&fscache_n_stores);
43389 + fscache_stat_unchecked(&fscache_n_stores);
43390
43391 op = kzalloc(sizeof(*op), GFP_NOIO);
43392 if (!op)
43393 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43394 spin_unlock(&cookie->stores_lock);
43395 spin_unlock(&object->lock);
43396
43397 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43398 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43399 op->store_limit = object->store_limit;
43400
43401 if (fscache_submit_op(object, &op->op) < 0)
43402 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43403
43404 spin_unlock(&cookie->lock);
43405 radix_tree_preload_end();
43406 - fscache_stat(&fscache_n_store_ops);
43407 - fscache_stat(&fscache_n_stores_ok);
43408 + fscache_stat_unchecked(&fscache_n_store_ops);
43409 + fscache_stat_unchecked(&fscache_n_stores_ok);
43410
43411 /* the work queue now carries its own ref on the object */
43412 fscache_put_operation(&op->op);
43413 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43414 return 0;
43415
43416 already_queued:
43417 - fscache_stat(&fscache_n_stores_again);
43418 + fscache_stat_unchecked(&fscache_n_stores_again);
43419 already_pending:
43420 spin_unlock(&cookie->stores_lock);
43421 spin_unlock(&object->lock);
43422 spin_unlock(&cookie->lock);
43423 radix_tree_preload_end();
43424 kfree(op);
43425 - fscache_stat(&fscache_n_stores_ok);
43426 + fscache_stat_unchecked(&fscache_n_stores_ok);
43427 _leave(" = 0");
43428 return 0;
43429
43430 @@ -851,14 +851,14 @@ nobufs:
43431 spin_unlock(&cookie->lock);
43432 radix_tree_preload_end();
43433 kfree(op);
43434 - fscache_stat(&fscache_n_stores_nobufs);
43435 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
43436 _leave(" = -ENOBUFS");
43437 return -ENOBUFS;
43438
43439 nomem_free:
43440 kfree(op);
43441 nomem:
43442 - fscache_stat(&fscache_n_stores_oom);
43443 + fscache_stat_unchecked(&fscache_n_stores_oom);
43444 _leave(" = -ENOMEM");
43445 return -ENOMEM;
43446 }
43447 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43448 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43449 ASSERTCMP(page, !=, NULL);
43450
43451 - fscache_stat(&fscache_n_uncaches);
43452 + fscache_stat_unchecked(&fscache_n_uncaches);
43453
43454 /* cache withdrawal may beat us to it */
43455 if (!PageFsCache(page))
43456 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43457 unsigned long loop;
43458
43459 #ifdef CONFIG_FSCACHE_STATS
43460 - atomic_add(pagevec->nr, &fscache_n_marks);
43461 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43462 #endif
43463
43464 for (loop = 0; loop < pagevec->nr; loop++) {
43465 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43466 index 4765190..2a067f2 100644
43467 --- a/fs/fscache/stats.c
43468 +++ b/fs/fscache/stats.c
43469 @@ -18,95 +18,95 @@
43470 /*
43471 * operation counters
43472 */
43473 -atomic_t fscache_n_op_pend;
43474 -atomic_t fscache_n_op_run;
43475 -atomic_t fscache_n_op_enqueue;
43476 -atomic_t fscache_n_op_requeue;
43477 -atomic_t fscache_n_op_deferred_release;
43478 -atomic_t fscache_n_op_release;
43479 -atomic_t fscache_n_op_gc;
43480 -atomic_t fscache_n_op_cancelled;
43481 -atomic_t fscache_n_op_rejected;
43482 +atomic_unchecked_t fscache_n_op_pend;
43483 +atomic_unchecked_t fscache_n_op_run;
43484 +atomic_unchecked_t fscache_n_op_enqueue;
43485 +atomic_unchecked_t fscache_n_op_requeue;
43486 +atomic_unchecked_t fscache_n_op_deferred_release;
43487 +atomic_unchecked_t fscache_n_op_release;
43488 +atomic_unchecked_t fscache_n_op_gc;
43489 +atomic_unchecked_t fscache_n_op_cancelled;
43490 +atomic_unchecked_t fscache_n_op_rejected;
43491
43492 -atomic_t fscache_n_attr_changed;
43493 -atomic_t fscache_n_attr_changed_ok;
43494 -atomic_t fscache_n_attr_changed_nobufs;
43495 -atomic_t fscache_n_attr_changed_nomem;
43496 -atomic_t fscache_n_attr_changed_calls;
43497 +atomic_unchecked_t fscache_n_attr_changed;
43498 +atomic_unchecked_t fscache_n_attr_changed_ok;
43499 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
43500 +atomic_unchecked_t fscache_n_attr_changed_nomem;
43501 +atomic_unchecked_t fscache_n_attr_changed_calls;
43502
43503 -atomic_t fscache_n_allocs;
43504 -atomic_t fscache_n_allocs_ok;
43505 -atomic_t fscache_n_allocs_wait;
43506 -atomic_t fscache_n_allocs_nobufs;
43507 -atomic_t fscache_n_allocs_intr;
43508 -atomic_t fscache_n_allocs_object_dead;
43509 -atomic_t fscache_n_alloc_ops;
43510 -atomic_t fscache_n_alloc_op_waits;
43511 +atomic_unchecked_t fscache_n_allocs;
43512 +atomic_unchecked_t fscache_n_allocs_ok;
43513 +atomic_unchecked_t fscache_n_allocs_wait;
43514 +atomic_unchecked_t fscache_n_allocs_nobufs;
43515 +atomic_unchecked_t fscache_n_allocs_intr;
43516 +atomic_unchecked_t fscache_n_allocs_object_dead;
43517 +atomic_unchecked_t fscache_n_alloc_ops;
43518 +atomic_unchecked_t fscache_n_alloc_op_waits;
43519
43520 -atomic_t fscache_n_retrievals;
43521 -atomic_t fscache_n_retrievals_ok;
43522 -atomic_t fscache_n_retrievals_wait;
43523 -atomic_t fscache_n_retrievals_nodata;
43524 -atomic_t fscache_n_retrievals_nobufs;
43525 -atomic_t fscache_n_retrievals_intr;
43526 -atomic_t fscache_n_retrievals_nomem;
43527 -atomic_t fscache_n_retrievals_object_dead;
43528 -atomic_t fscache_n_retrieval_ops;
43529 -atomic_t fscache_n_retrieval_op_waits;
43530 +atomic_unchecked_t fscache_n_retrievals;
43531 +atomic_unchecked_t fscache_n_retrievals_ok;
43532 +atomic_unchecked_t fscache_n_retrievals_wait;
43533 +atomic_unchecked_t fscache_n_retrievals_nodata;
43534 +atomic_unchecked_t fscache_n_retrievals_nobufs;
43535 +atomic_unchecked_t fscache_n_retrievals_intr;
43536 +atomic_unchecked_t fscache_n_retrievals_nomem;
43537 +atomic_unchecked_t fscache_n_retrievals_object_dead;
43538 +atomic_unchecked_t fscache_n_retrieval_ops;
43539 +atomic_unchecked_t fscache_n_retrieval_op_waits;
43540
43541 -atomic_t fscache_n_stores;
43542 -atomic_t fscache_n_stores_ok;
43543 -atomic_t fscache_n_stores_again;
43544 -atomic_t fscache_n_stores_nobufs;
43545 -atomic_t fscache_n_stores_oom;
43546 -atomic_t fscache_n_store_ops;
43547 -atomic_t fscache_n_store_calls;
43548 -atomic_t fscache_n_store_pages;
43549 -atomic_t fscache_n_store_radix_deletes;
43550 -atomic_t fscache_n_store_pages_over_limit;
43551 +atomic_unchecked_t fscache_n_stores;
43552 +atomic_unchecked_t fscache_n_stores_ok;
43553 +atomic_unchecked_t fscache_n_stores_again;
43554 +atomic_unchecked_t fscache_n_stores_nobufs;
43555 +atomic_unchecked_t fscache_n_stores_oom;
43556 +atomic_unchecked_t fscache_n_store_ops;
43557 +atomic_unchecked_t fscache_n_store_calls;
43558 +atomic_unchecked_t fscache_n_store_pages;
43559 +atomic_unchecked_t fscache_n_store_radix_deletes;
43560 +atomic_unchecked_t fscache_n_store_pages_over_limit;
43561
43562 -atomic_t fscache_n_store_vmscan_not_storing;
43563 -atomic_t fscache_n_store_vmscan_gone;
43564 -atomic_t fscache_n_store_vmscan_busy;
43565 -atomic_t fscache_n_store_vmscan_cancelled;
43566 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43567 +atomic_unchecked_t fscache_n_store_vmscan_gone;
43568 +atomic_unchecked_t fscache_n_store_vmscan_busy;
43569 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43570
43571 -atomic_t fscache_n_marks;
43572 -atomic_t fscache_n_uncaches;
43573 +atomic_unchecked_t fscache_n_marks;
43574 +atomic_unchecked_t fscache_n_uncaches;
43575
43576 -atomic_t fscache_n_acquires;
43577 -atomic_t fscache_n_acquires_null;
43578 -atomic_t fscache_n_acquires_no_cache;
43579 -atomic_t fscache_n_acquires_ok;
43580 -atomic_t fscache_n_acquires_nobufs;
43581 -atomic_t fscache_n_acquires_oom;
43582 +atomic_unchecked_t fscache_n_acquires;
43583 +atomic_unchecked_t fscache_n_acquires_null;
43584 +atomic_unchecked_t fscache_n_acquires_no_cache;
43585 +atomic_unchecked_t fscache_n_acquires_ok;
43586 +atomic_unchecked_t fscache_n_acquires_nobufs;
43587 +atomic_unchecked_t fscache_n_acquires_oom;
43588
43589 -atomic_t fscache_n_updates;
43590 -atomic_t fscache_n_updates_null;
43591 -atomic_t fscache_n_updates_run;
43592 +atomic_unchecked_t fscache_n_updates;
43593 +atomic_unchecked_t fscache_n_updates_null;
43594 +atomic_unchecked_t fscache_n_updates_run;
43595
43596 -atomic_t fscache_n_relinquishes;
43597 -atomic_t fscache_n_relinquishes_null;
43598 -atomic_t fscache_n_relinquishes_waitcrt;
43599 -atomic_t fscache_n_relinquishes_retire;
43600 +atomic_unchecked_t fscache_n_relinquishes;
43601 +atomic_unchecked_t fscache_n_relinquishes_null;
43602 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43603 +atomic_unchecked_t fscache_n_relinquishes_retire;
43604
43605 -atomic_t fscache_n_cookie_index;
43606 -atomic_t fscache_n_cookie_data;
43607 -atomic_t fscache_n_cookie_special;
43608 +atomic_unchecked_t fscache_n_cookie_index;
43609 +atomic_unchecked_t fscache_n_cookie_data;
43610 +atomic_unchecked_t fscache_n_cookie_special;
43611
43612 -atomic_t fscache_n_object_alloc;
43613 -atomic_t fscache_n_object_no_alloc;
43614 -atomic_t fscache_n_object_lookups;
43615 -atomic_t fscache_n_object_lookups_negative;
43616 -atomic_t fscache_n_object_lookups_positive;
43617 -atomic_t fscache_n_object_lookups_timed_out;
43618 -atomic_t fscache_n_object_created;
43619 -atomic_t fscache_n_object_avail;
43620 -atomic_t fscache_n_object_dead;
43621 +atomic_unchecked_t fscache_n_object_alloc;
43622 +atomic_unchecked_t fscache_n_object_no_alloc;
43623 +atomic_unchecked_t fscache_n_object_lookups;
43624 +atomic_unchecked_t fscache_n_object_lookups_negative;
43625 +atomic_unchecked_t fscache_n_object_lookups_positive;
43626 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
43627 +atomic_unchecked_t fscache_n_object_created;
43628 +atomic_unchecked_t fscache_n_object_avail;
43629 +atomic_unchecked_t fscache_n_object_dead;
43630
43631 -atomic_t fscache_n_checkaux_none;
43632 -atomic_t fscache_n_checkaux_okay;
43633 -atomic_t fscache_n_checkaux_update;
43634 -atomic_t fscache_n_checkaux_obsolete;
43635 +atomic_unchecked_t fscache_n_checkaux_none;
43636 +atomic_unchecked_t fscache_n_checkaux_okay;
43637 +atomic_unchecked_t fscache_n_checkaux_update;
43638 +atomic_unchecked_t fscache_n_checkaux_obsolete;
43639
43640 atomic_t fscache_n_cop_alloc_object;
43641 atomic_t fscache_n_cop_lookup_object;
43642 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
43643 seq_puts(m, "FS-Cache statistics\n");
43644
43645 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43646 - atomic_read(&fscache_n_cookie_index),
43647 - atomic_read(&fscache_n_cookie_data),
43648 - atomic_read(&fscache_n_cookie_special));
43649 + atomic_read_unchecked(&fscache_n_cookie_index),
43650 + atomic_read_unchecked(&fscache_n_cookie_data),
43651 + atomic_read_unchecked(&fscache_n_cookie_special));
43652
43653 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43654 - atomic_read(&fscache_n_object_alloc),
43655 - atomic_read(&fscache_n_object_no_alloc),
43656 - atomic_read(&fscache_n_object_avail),
43657 - atomic_read(&fscache_n_object_dead));
43658 + atomic_read_unchecked(&fscache_n_object_alloc),
43659 + atomic_read_unchecked(&fscache_n_object_no_alloc),
43660 + atomic_read_unchecked(&fscache_n_object_avail),
43661 + atomic_read_unchecked(&fscache_n_object_dead));
43662 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43663 - atomic_read(&fscache_n_checkaux_none),
43664 - atomic_read(&fscache_n_checkaux_okay),
43665 - atomic_read(&fscache_n_checkaux_update),
43666 - atomic_read(&fscache_n_checkaux_obsolete));
43667 + atomic_read_unchecked(&fscache_n_checkaux_none),
43668 + atomic_read_unchecked(&fscache_n_checkaux_okay),
43669 + atomic_read_unchecked(&fscache_n_checkaux_update),
43670 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43671
43672 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43673 - atomic_read(&fscache_n_marks),
43674 - atomic_read(&fscache_n_uncaches));
43675 + atomic_read_unchecked(&fscache_n_marks),
43676 + atomic_read_unchecked(&fscache_n_uncaches));
43677
43678 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43679 " oom=%u\n",
43680 - atomic_read(&fscache_n_acquires),
43681 - atomic_read(&fscache_n_acquires_null),
43682 - atomic_read(&fscache_n_acquires_no_cache),
43683 - atomic_read(&fscache_n_acquires_ok),
43684 - atomic_read(&fscache_n_acquires_nobufs),
43685 - atomic_read(&fscache_n_acquires_oom));
43686 + atomic_read_unchecked(&fscache_n_acquires),
43687 + atomic_read_unchecked(&fscache_n_acquires_null),
43688 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
43689 + atomic_read_unchecked(&fscache_n_acquires_ok),
43690 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
43691 + atomic_read_unchecked(&fscache_n_acquires_oom));
43692
43693 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43694 - atomic_read(&fscache_n_object_lookups),
43695 - atomic_read(&fscache_n_object_lookups_negative),
43696 - atomic_read(&fscache_n_object_lookups_positive),
43697 - atomic_read(&fscache_n_object_created),
43698 - atomic_read(&fscache_n_object_lookups_timed_out));
43699 + atomic_read_unchecked(&fscache_n_object_lookups),
43700 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
43701 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
43702 + atomic_read_unchecked(&fscache_n_object_created),
43703 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43704
43705 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43706 - atomic_read(&fscache_n_updates),
43707 - atomic_read(&fscache_n_updates_null),
43708 - atomic_read(&fscache_n_updates_run));
43709 + atomic_read_unchecked(&fscache_n_updates),
43710 + atomic_read_unchecked(&fscache_n_updates_null),
43711 + atomic_read_unchecked(&fscache_n_updates_run));
43712
43713 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43714 - atomic_read(&fscache_n_relinquishes),
43715 - atomic_read(&fscache_n_relinquishes_null),
43716 - atomic_read(&fscache_n_relinquishes_waitcrt),
43717 - atomic_read(&fscache_n_relinquishes_retire));
43718 + atomic_read_unchecked(&fscache_n_relinquishes),
43719 + atomic_read_unchecked(&fscache_n_relinquishes_null),
43720 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43721 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
43722
43723 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43724 - atomic_read(&fscache_n_attr_changed),
43725 - atomic_read(&fscache_n_attr_changed_ok),
43726 - atomic_read(&fscache_n_attr_changed_nobufs),
43727 - atomic_read(&fscache_n_attr_changed_nomem),
43728 - atomic_read(&fscache_n_attr_changed_calls));
43729 + atomic_read_unchecked(&fscache_n_attr_changed),
43730 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
43731 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43732 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43733 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
43734
43735 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43736 - atomic_read(&fscache_n_allocs),
43737 - atomic_read(&fscache_n_allocs_ok),
43738 - atomic_read(&fscache_n_allocs_wait),
43739 - atomic_read(&fscache_n_allocs_nobufs),
43740 - atomic_read(&fscache_n_allocs_intr));
43741 + atomic_read_unchecked(&fscache_n_allocs),
43742 + atomic_read_unchecked(&fscache_n_allocs_ok),
43743 + atomic_read_unchecked(&fscache_n_allocs_wait),
43744 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
43745 + atomic_read_unchecked(&fscache_n_allocs_intr));
43746 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43747 - atomic_read(&fscache_n_alloc_ops),
43748 - atomic_read(&fscache_n_alloc_op_waits),
43749 - atomic_read(&fscache_n_allocs_object_dead));
43750 + atomic_read_unchecked(&fscache_n_alloc_ops),
43751 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
43752 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
43753
43754 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43755 " int=%u oom=%u\n",
43756 - atomic_read(&fscache_n_retrievals),
43757 - atomic_read(&fscache_n_retrievals_ok),
43758 - atomic_read(&fscache_n_retrievals_wait),
43759 - atomic_read(&fscache_n_retrievals_nodata),
43760 - atomic_read(&fscache_n_retrievals_nobufs),
43761 - atomic_read(&fscache_n_retrievals_intr),
43762 - atomic_read(&fscache_n_retrievals_nomem));
43763 + atomic_read_unchecked(&fscache_n_retrievals),
43764 + atomic_read_unchecked(&fscache_n_retrievals_ok),
43765 + atomic_read_unchecked(&fscache_n_retrievals_wait),
43766 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
43767 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43768 + atomic_read_unchecked(&fscache_n_retrievals_intr),
43769 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
43770 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43771 - atomic_read(&fscache_n_retrieval_ops),
43772 - atomic_read(&fscache_n_retrieval_op_waits),
43773 - atomic_read(&fscache_n_retrievals_object_dead));
43774 + atomic_read_unchecked(&fscache_n_retrieval_ops),
43775 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43776 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43777
43778 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43779 - atomic_read(&fscache_n_stores),
43780 - atomic_read(&fscache_n_stores_ok),
43781 - atomic_read(&fscache_n_stores_again),
43782 - atomic_read(&fscache_n_stores_nobufs),
43783 - atomic_read(&fscache_n_stores_oom));
43784 + atomic_read_unchecked(&fscache_n_stores),
43785 + atomic_read_unchecked(&fscache_n_stores_ok),
43786 + atomic_read_unchecked(&fscache_n_stores_again),
43787 + atomic_read_unchecked(&fscache_n_stores_nobufs),
43788 + atomic_read_unchecked(&fscache_n_stores_oom));
43789 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43790 - atomic_read(&fscache_n_store_ops),
43791 - atomic_read(&fscache_n_store_calls),
43792 - atomic_read(&fscache_n_store_pages),
43793 - atomic_read(&fscache_n_store_radix_deletes),
43794 - atomic_read(&fscache_n_store_pages_over_limit));
43795 + atomic_read_unchecked(&fscache_n_store_ops),
43796 + atomic_read_unchecked(&fscache_n_store_calls),
43797 + atomic_read_unchecked(&fscache_n_store_pages),
43798 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
43799 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43800
43801 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43802 - atomic_read(&fscache_n_store_vmscan_not_storing),
43803 - atomic_read(&fscache_n_store_vmscan_gone),
43804 - atomic_read(&fscache_n_store_vmscan_busy),
43805 - atomic_read(&fscache_n_store_vmscan_cancelled));
43806 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43807 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43808 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43809 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43810
43811 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43812 - atomic_read(&fscache_n_op_pend),
43813 - atomic_read(&fscache_n_op_run),
43814 - atomic_read(&fscache_n_op_enqueue),
43815 - atomic_read(&fscache_n_op_cancelled),
43816 - atomic_read(&fscache_n_op_rejected));
43817 + atomic_read_unchecked(&fscache_n_op_pend),
43818 + atomic_read_unchecked(&fscache_n_op_run),
43819 + atomic_read_unchecked(&fscache_n_op_enqueue),
43820 + atomic_read_unchecked(&fscache_n_op_cancelled),
43821 + atomic_read_unchecked(&fscache_n_op_rejected));
43822 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43823 - atomic_read(&fscache_n_op_deferred_release),
43824 - atomic_read(&fscache_n_op_release),
43825 - atomic_read(&fscache_n_op_gc));
43826 + atomic_read_unchecked(&fscache_n_op_deferred_release),
43827 + atomic_read_unchecked(&fscache_n_op_release),
43828 + atomic_read_unchecked(&fscache_n_op_gc));
43829
43830 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43831 atomic_read(&fscache_n_cop_alloc_object),
43832 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
43833 index 3426521..3b75162 100644
43834 --- a/fs/fuse/cuse.c
43835 +++ b/fs/fuse/cuse.c
43836 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
43837 INIT_LIST_HEAD(&cuse_conntbl[i]);
43838
43839 /* inherit and extend fuse_dev_operations */
43840 - cuse_channel_fops = fuse_dev_operations;
43841 - cuse_channel_fops.owner = THIS_MODULE;
43842 - cuse_channel_fops.open = cuse_channel_open;
43843 - cuse_channel_fops.release = cuse_channel_release;
43844 + pax_open_kernel();
43845 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43846 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43847 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
43848 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
43849 + pax_close_kernel();
43850
43851 cuse_class = class_create(THIS_MODULE, "cuse");
43852 if (IS_ERR(cuse_class))
43853 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
43854 index 2aaf3ea..8e50863 100644
43855 --- a/fs/fuse/dev.c
43856 +++ b/fs/fuse/dev.c
43857 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
43858 ret = 0;
43859 pipe_lock(pipe);
43860
43861 - if (!pipe->readers) {
43862 + if (!atomic_read(&pipe->readers)) {
43863 send_sig(SIGPIPE, current, 0);
43864 if (!ret)
43865 ret = -EPIPE;
43866 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
43867 index 9f63e49..d8a64c0 100644
43868 --- a/fs/fuse/dir.c
43869 +++ b/fs/fuse/dir.c
43870 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
43871 return link;
43872 }
43873
43874 -static void free_link(char *link)
43875 +static void free_link(const char *link)
43876 {
43877 if (!IS_ERR(link))
43878 free_page((unsigned long) link);
43879 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
43880 index cfd4959..a780959 100644
43881 --- a/fs/gfs2/inode.c
43882 +++ b/fs/gfs2/inode.c
43883 @@ -1490,7 +1490,7 @@ out:
43884
43885 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43886 {
43887 - char *s = nd_get_link(nd);
43888 + const char *s = nd_get_link(nd);
43889 if (!IS_ERR(s))
43890 kfree(s);
43891 }
43892 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
43893 index 0be5a78..9cfb853 100644
43894 --- a/fs/hugetlbfs/inode.c
43895 +++ b/fs/hugetlbfs/inode.c
43896 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
43897 .kill_sb = kill_litter_super,
43898 };
43899
43900 -static struct vfsmount *hugetlbfs_vfsmount;
43901 +struct vfsmount *hugetlbfs_vfsmount;
43902
43903 static int can_do_hugetlb_shm(void)
43904 {
43905 diff --git a/fs/inode.c b/fs/inode.c
43906 index ee4e66b..0451521 100644
43907 --- a/fs/inode.c
43908 +++ b/fs/inode.c
43909 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
43910
43911 #ifdef CONFIG_SMP
43912 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
43913 - static atomic_t shared_last_ino;
43914 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
43915 + static atomic_unchecked_t shared_last_ino;
43916 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
43917
43918 res = next - LAST_INO_BATCH;
43919 }
43920 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
43921 index e513f19..2ab1351 100644
43922 --- a/fs/jffs2/erase.c
43923 +++ b/fs/jffs2/erase.c
43924 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
43925 struct jffs2_unknown_node marker = {
43926 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43927 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43928 - .totlen = cpu_to_je32(c->cleanmarker_size)
43929 + .totlen = cpu_to_je32(c->cleanmarker_size),
43930 + .hdr_crc = cpu_to_je32(0)
43931 };
43932
43933 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43934 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
43935 index b09e51d..e482afa 100644
43936 --- a/fs/jffs2/wbuf.c
43937 +++ b/fs/jffs2/wbuf.c
43938 @@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
43939 {
43940 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43941 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43942 - .totlen = constant_cpu_to_je32(8)
43943 + .totlen = constant_cpu_to_je32(8),
43944 + .hdr_crc = constant_cpu_to_je32(0)
43945 };
43946
43947 /*
43948 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
43949 index a44eff0..462e07d 100644
43950 --- a/fs/jfs/super.c
43951 +++ b/fs/jfs/super.c
43952 @@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
43953
43954 jfs_inode_cachep =
43955 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43956 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43957 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43958 init_once);
43959 if (jfs_inode_cachep == NULL)
43960 return -ENOMEM;
43961 diff --git a/fs/libfs.c b/fs/libfs.c
43962 index f6d411e..e82a08d 100644
43963 --- a/fs/libfs.c
43964 +++ b/fs/libfs.c
43965 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
43966
43967 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43968 struct dentry *next;
43969 + char d_name[sizeof(next->d_iname)];
43970 + const unsigned char *name;
43971 +
43972 next = list_entry(p, struct dentry, d_u.d_child);
43973 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
43974 if (!simple_positive(next)) {
43975 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
43976
43977 spin_unlock(&next->d_lock);
43978 spin_unlock(&dentry->d_lock);
43979 - if (filldir(dirent, next->d_name.name,
43980 + name = next->d_name.name;
43981 + if (name == next->d_iname) {
43982 + memcpy(d_name, name, next->d_name.len);
43983 + name = d_name;
43984 + }
43985 + if (filldir(dirent, name,
43986 next->d_name.len, filp->f_pos,
43987 next->d_inode->i_ino,
43988 dt_type(next->d_inode)) < 0)
43989 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
43990 index 8392cb8..80d6193 100644
43991 --- a/fs/lockd/clntproc.c
43992 +++ b/fs/lockd/clntproc.c
43993 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
43994 /*
43995 * Cookie counter for NLM requests
43996 */
43997 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43998 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43999
44000 void nlmclnt_next_cookie(struct nlm_cookie *c)
44001 {
44002 - u32 cookie = atomic_inc_return(&nlm_cookie);
44003 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44004
44005 memcpy(c->data, &cookie, 4);
44006 c->len=4;
44007 diff --git a/fs/locks.c b/fs/locks.c
44008 index 637694b..f84a121 100644
44009 --- a/fs/locks.c
44010 +++ b/fs/locks.c
44011 @@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44012 return;
44013
44014 if (filp->f_op && filp->f_op->flock) {
44015 - struct file_lock fl = {
44016 + struct file_lock flock = {
44017 .fl_pid = current->tgid,
44018 .fl_file = filp,
44019 .fl_flags = FL_FLOCK,
44020 .fl_type = F_UNLCK,
44021 .fl_end = OFFSET_MAX,
44022 };
44023 - filp->f_op->flock(filp, F_SETLKW, &fl);
44024 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
44025 - fl.fl_ops->fl_release_private(&fl);
44026 + filp->f_op->flock(filp, F_SETLKW, &flock);
44027 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
44028 + flock.fl_ops->fl_release_private(&flock);
44029 }
44030
44031 lock_flocks();
44032 diff --git a/fs/namei.c b/fs/namei.c
44033 index 5008f01..90328a7 100644
44034 --- a/fs/namei.c
44035 +++ b/fs/namei.c
44036 @@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44037 if (ret != -EACCES)
44038 return ret;
44039
44040 +#ifdef CONFIG_GRKERNSEC
44041 + /* we'll block if we have to log due to a denied capability use */
44042 + if (mask & MAY_NOT_BLOCK)
44043 + return -ECHILD;
44044 +#endif
44045 +
44046 if (S_ISDIR(inode->i_mode)) {
44047 /* DACs are overridable for directories */
44048 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44049 - return 0;
44050 if (!(mask & MAY_WRITE))
44051 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44052 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44053 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44054 return 0;
44055 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44056 + return 0;
44057 return -EACCES;
44058 }
44059 /*
44060 + * Searching includes executable on directories, else just read.
44061 + */
44062 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44063 + if (mask == MAY_READ)
44064 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44065 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44066 + return 0;
44067 +
44068 + /*
44069 * Read/write DACs are always overridable.
44070 * Executable DACs are overridable when there is
44071 * at least one exec bit set.
44072 @@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44073 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44074 return 0;
44075
44076 - /*
44077 - * Searching includes executable on directories, else just read.
44078 - */
44079 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44080 - if (mask == MAY_READ)
44081 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44082 - return 0;
44083 -
44084 return -EACCES;
44085 }
44086
44087 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44088 return error;
44089 }
44090
44091 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
44092 + dentry->d_inode, dentry, nd->path.mnt)) {
44093 + error = -EACCES;
44094 + *p = ERR_PTR(error); /* no ->put_link(), please */
44095 + path_put(&nd->path);
44096 + return error;
44097 + }
44098 +
44099 nd->last_type = LAST_BIND;
44100 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44101 error = PTR_ERR(*p);
44102 if (!IS_ERR(*p)) {
44103 - char *s = nd_get_link(nd);
44104 + const char *s = nd_get_link(nd);
44105 error = 0;
44106 if (s)
44107 error = __vfs_follow_link(nd, s);
44108 @@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
44109 if (!err)
44110 err = complete_walk(nd);
44111
44112 + if (!(nd->flags & LOOKUP_PARENT)) {
44113 +#ifdef CONFIG_GRKERNSEC
44114 + if (flags & LOOKUP_RCU) {
44115 + if (!err)
44116 + path_put(&nd->path);
44117 + err = -ECHILD;
44118 + } else
44119 +#endif
44120 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44121 + if (!err)
44122 + path_put(&nd->path);
44123 + err = -ENOENT;
44124 + }
44125 + }
44126 +
44127 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44128 if (!nd->inode->i_op->lookup) {
44129 path_put(&nd->path);
44130 @@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
44131 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44132
44133 if (likely(!retval)) {
44134 + if (*name != '/' && nd->path.dentry && nd->inode) {
44135 +#ifdef CONFIG_GRKERNSEC
44136 + if (flags & LOOKUP_RCU)
44137 + return -ECHILD;
44138 +#endif
44139 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44140 + return -ENOENT;
44141 + }
44142 +
44143 if (unlikely(!audit_dummy_context())) {
44144 if (nd->path.dentry && nd->inode)
44145 audit_inode(name, nd->path.dentry);
44146 @@ -2046,6 +2086,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44147 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44148 return -EPERM;
44149
44150 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44151 + return -EPERM;
44152 + if (gr_handle_rawio(inode))
44153 + return -EPERM;
44154 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44155 + return -EACCES;
44156 +
44157 return 0;
44158 }
44159
44160 @@ -2107,6 +2154,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44161 error = complete_walk(nd);
44162 if (error)
44163 return ERR_PTR(error);
44164 +#ifdef CONFIG_GRKERNSEC
44165 + if (nd->flags & LOOKUP_RCU) {
44166 + error = -ECHILD;
44167 + goto exit;
44168 + }
44169 +#endif
44170 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44171 + error = -ENOENT;
44172 + goto exit;
44173 + }
44174 audit_inode(pathname, nd->path.dentry);
44175 if (open_flag & O_CREAT) {
44176 error = -EISDIR;
44177 @@ -2117,6 +2174,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44178 error = complete_walk(nd);
44179 if (error)
44180 return ERR_PTR(error);
44181 +#ifdef CONFIG_GRKERNSEC
44182 + if (nd->flags & LOOKUP_RCU) {
44183 + error = -ECHILD;
44184 + goto exit;
44185 + }
44186 +#endif
44187 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44188 + error = -ENOENT;
44189 + goto exit;
44190 + }
44191 audit_inode(pathname, dir);
44192 goto ok;
44193 }
44194 @@ -2138,6 +2205,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44195 error = complete_walk(nd);
44196 if (error)
44197 return ERR_PTR(-ECHILD);
44198 +#ifdef CONFIG_GRKERNSEC
44199 + if (nd->flags & LOOKUP_RCU) {
44200 + error = -ECHILD;
44201 + goto exit;
44202 + }
44203 +#endif
44204 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44205 + error = -ENOENT;
44206 + goto exit;
44207 + }
44208
44209 error = -ENOTDIR;
44210 if (nd->flags & LOOKUP_DIRECTORY) {
44211 @@ -2178,6 +2255,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44212 /* Negative dentry, just create the file */
44213 if (!dentry->d_inode) {
44214 int mode = op->mode;
44215 +
44216 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44217 + error = -EACCES;
44218 + goto exit_mutex_unlock;
44219 + }
44220 +
44221 if (!IS_POSIXACL(dir->d_inode))
44222 mode &= ~current_umask();
44223 /*
44224 @@ -2201,6 +2284,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44225 error = vfs_create(dir->d_inode, dentry, mode, nd);
44226 if (error)
44227 goto exit_mutex_unlock;
44228 + else
44229 + gr_handle_create(path->dentry, path->mnt);
44230 mutex_unlock(&dir->d_inode->i_mutex);
44231 dput(nd->path.dentry);
44232 nd->path.dentry = dentry;
44233 @@ -2210,6 +2295,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44234 /*
44235 * It already exists.
44236 */
44237 +
44238 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44239 + error = -ENOENT;
44240 + goto exit_mutex_unlock;
44241 + }
44242 +
44243 + /* only check if O_CREAT is specified, all other checks need to go
44244 + into may_open */
44245 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44246 + error = -EACCES;
44247 + goto exit_mutex_unlock;
44248 + }
44249 +
44250 mutex_unlock(&dir->d_inode->i_mutex);
44251 audit_inode(pathname, path->dentry);
44252
44253 @@ -2422,6 +2520,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44254 *path = nd.path;
44255 return dentry;
44256 eexist:
44257 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44258 + dput(dentry);
44259 + dentry = ERR_PTR(-ENOENT);
44260 + goto fail;
44261 + }
44262 dput(dentry);
44263 dentry = ERR_PTR(-EEXIST);
44264 fail:
44265 @@ -2444,6 +2547,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44266 }
44267 EXPORT_SYMBOL(user_path_create);
44268
44269 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44270 +{
44271 + char *tmp = getname(pathname);
44272 + struct dentry *res;
44273 + if (IS_ERR(tmp))
44274 + return ERR_CAST(tmp);
44275 + res = kern_path_create(dfd, tmp, path, is_dir);
44276 + if (IS_ERR(res))
44277 + putname(tmp);
44278 + else
44279 + *to = tmp;
44280 + return res;
44281 +}
44282 +
44283 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44284 {
44285 int error = may_create(dir, dentry);
44286 @@ -2511,6 +2628,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44287 error = mnt_want_write(path.mnt);
44288 if (error)
44289 goto out_dput;
44290 +
44291 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44292 + error = -EPERM;
44293 + goto out_drop_write;
44294 + }
44295 +
44296 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44297 + error = -EACCES;
44298 + goto out_drop_write;
44299 + }
44300 +
44301 error = security_path_mknod(&path, dentry, mode, dev);
44302 if (error)
44303 goto out_drop_write;
44304 @@ -2528,6 +2656,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44305 }
44306 out_drop_write:
44307 mnt_drop_write(path.mnt);
44308 +
44309 + if (!error)
44310 + gr_handle_create(dentry, path.mnt);
44311 out_dput:
44312 dput(dentry);
44313 mutex_unlock(&path.dentry->d_inode->i_mutex);
44314 @@ -2577,12 +2708,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44315 error = mnt_want_write(path.mnt);
44316 if (error)
44317 goto out_dput;
44318 +
44319 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44320 + error = -EACCES;
44321 + goto out_drop_write;
44322 + }
44323 +
44324 error = security_path_mkdir(&path, dentry, mode);
44325 if (error)
44326 goto out_drop_write;
44327 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44328 out_drop_write:
44329 mnt_drop_write(path.mnt);
44330 +
44331 + if (!error)
44332 + gr_handle_create(dentry, path.mnt);
44333 out_dput:
44334 dput(dentry);
44335 mutex_unlock(&path.dentry->d_inode->i_mutex);
44336 @@ -2662,6 +2802,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44337 char * name;
44338 struct dentry *dentry;
44339 struct nameidata nd;
44340 + ino_t saved_ino = 0;
44341 + dev_t saved_dev = 0;
44342
44343 error = user_path_parent(dfd, pathname, &nd, &name);
44344 if (error)
44345 @@ -2690,6 +2832,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44346 error = -ENOENT;
44347 goto exit3;
44348 }
44349 +
44350 + saved_ino = dentry->d_inode->i_ino;
44351 + saved_dev = gr_get_dev_from_dentry(dentry);
44352 +
44353 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44354 + error = -EACCES;
44355 + goto exit3;
44356 + }
44357 +
44358 error = mnt_want_write(nd.path.mnt);
44359 if (error)
44360 goto exit3;
44361 @@ -2697,6 +2848,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44362 if (error)
44363 goto exit4;
44364 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44365 + if (!error && (saved_dev || saved_ino))
44366 + gr_handle_delete(saved_ino, saved_dev);
44367 exit4:
44368 mnt_drop_write(nd.path.mnt);
44369 exit3:
44370 @@ -2759,6 +2912,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44371 struct dentry *dentry;
44372 struct nameidata nd;
44373 struct inode *inode = NULL;
44374 + ino_t saved_ino = 0;
44375 + dev_t saved_dev = 0;
44376
44377 error = user_path_parent(dfd, pathname, &nd, &name);
44378 if (error)
44379 @@ -2781,6 +2936,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44380 if (!inode)
44381 goto slashes;
44382 ihold(inode);
44383 +
44384 + if (inode->i_nlink <= 1) {
44385 + saved_ino = inode->i_ino;
44386 + saved_dev = gr_get_dev_from_dentry(dentry);
44387 + }
44388 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44389 + error = -EACCES;
44390 + goto exit2;
44391 + }
44392 +
44393 error = mnt_want_write(nd.path.mnt);
44394 if (error)
44395 goto exit2;
44396 @@ -2788,6 +2953,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44397 if (error)
44398 goto exit3;
44399 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44400 + if (!error && (saved_ino || saved_dev))
44401 + gr_handle_delete(saved_ino, saved_dev);
44402 exit3:
44403 mnt_drop_write(nd.path.mnt);
44404 exit2:
44405 @@ -2863,10 +3030,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44406 error = mnt_want_write(path.mnt);
44407 if (error)
44408 goto out_dput;
44409 +
44410 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44411 + error = -EACCES;
44412 + goto out_drop_write;
44413 + }
44414 +
44415 error = security_path_symlink(&path, dentry, from);
44416 if (error)
44417 goto out_drop_write;
44418 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44419 + if (!error)
44420 + gr_handle_create(dentry, path.mnt);
44421 out_drop_write:
44422 mnt_drop_write(path.mnt);
44423 out_dput:
44424 @@ -2938,6 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44425 {
44426 struct dentry *new_dentry;
44427 struct path old_path, new_path;
44428 + char *to = NULL;
44429 int how = 0;
44430 int error;
44431
44432 @@ -2961,7 +3137,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44433 if (error)
44434 return error;
44435
44436 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44437 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44438 error = PTR_ERR(new_dentry);
44439 if (IS_ERR(new_dentry))
44440 goto out;
44441 @@ -2972,13 +3148,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44442 error = mnt_want_write(new_path.mnt);
44443 if (error)
44444 goto out_dput;
44445 +
44446 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44447 + old_path.dentry->d_inode,
44448 + old_path.dentry->d_inode->i_mode, to)) {
44449 + error = -EACCES;
44450 + goto out_drop_write;
44451 + }
44452 +
44453 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44454 + old_path.dentry, old_path.mnt, to)) {
44455 + error = -EACCES;
44456 + goto out_drop_write;
44457 + }
44458 +
44459 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44460 if (error)
44461 goto out_drop_write;
44462 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44463 + if (!error)
44464 + gr_handle_create(new_dentry, new_path.mnt);
44465 out_drop_write:
44466 mnt_drop_write(new_path.mnt);
44467 out_dput:
44468 + putname(to);
44469 dput(new_dentry);
44470 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44471 path_put(&new_path);
44472 @@ -3206,6 +3399,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44473 if (new_dentry == trap)
44474 goto exit5;
44475
44476 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44477 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44478 + to);
44479 + if (error)
44480 + goto exit5;
44481 +
44482 error = mnt_want_write(oldnd.path.mnt);
44483 if (error)
44484 goto exit5;
44485 @@ -3215,6 +3414,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44486 goto exit6;
44487 error = vfs_rename(old_dir->d_inode, old_dentry,
44488 new_dir->d_inode, new_dentry);
44489 + if (!error)
44490 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44491 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44492 exit6:
44493 mnt_drop_write(oldnd.path.mnt);
44494 exit5:
44495 @@ -3240,6 +3442,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44496
44497 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44498 {
44499 + char tmpbuf[64];
44500 + const char *newlink;
44501 int len;
44502
44503 len = PTR_ERR(link);
44504 @@ -3249,7 +3453,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44505 len = strlen(link);
44506 if (len > (unsigned) buflen)
44507 len = buflen;
44508 - if (copy_to_user(buffer, link, len))
44509 +
44510 + if (len < sizeof(tmpbuf)) {
44511 + memcpy(tmpbuf, link, len);
44512 + newlink = tmpbuf;
44513 + } else
44514 + newlink = link;
44515 +
44516 + if (copy_to_user(buffer, newlink, len))
44517 len = -EFAULT;
44518 out:
44519 return len;
44520 diff --git a/fs/namespace.c b/fs/namespace.c
44521 index cfc6d44..b4632a5 100644
44522 --- a/fs/namespace.c
44523 +++ b/fs/namespace.c
44524 @@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44525 if (!(sb->s_flags & MS_RDONLY))
44526 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44527 up_write(&sb->s_umount);
44528 +
44529 + gr_log_remount(mnt->mnt_devname, retval);
44530 +
44531 return retval;
44532 }
44533
44534 @@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44535 br_write_unlock(vfsmount_lock);
44536 up_write(&namespace_sem);
44537 release_mounts(&umount_list);
44538 +
44539 + gr_log_unmount(mnt->mnt_devname, retval);
44540 +
44541 return retval;
44542 }
44543
44544 @@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44545 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44546 MS_STRICTATIME);
44547
44548 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44549 + retval = -EPERM;
44550 + goto dput_out;
44551 + }
44552 +
44553 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44554 + retval = -EPERM;
44555 + goto dput_out;
44556 + }
44557 +
44558 if (flags & MS_REMOUNT)
44559 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44560 data_page);
44561 @@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44562 dev_name, data_page);
44563 dput_out:
44564 path_put(&path);
44565 +
44566 + gr_log_mount(dev_name, dir_name, retval);
44567 +
44568 return retval;
44569 }
44570
44571 @@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44572 if (error)
44573 goto out2;
44574
44575 + if (gr_handle_chroot_pivot()) {
44576 + error = -EPERM;
44577 + goto out2;
44578 + }
44579 +
44580 get_fs_root(current->fs, &root);
44581 error = lock_mount(&old);
44582 if (error)
44583 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44584 index 3db6b82..a57597e 100644
44585 --- a/fs/nfs/blocklayout/blocklayout.c
44586 +++ b/fs/nfs/blocklayout/blocklayout.c
44587 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44588 */
44589 struct parallel_io {
44590 struct kref refcnt;
44591 - struct rpc_call_ops call_ops;
44592 + rpc_call_ops_no_const call_ops;
44593 void (*pnfs_callback) (void *data);
44594 void *data;
44595 };
44596 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44597 index 50a15fa..ca113f9 100644
44598 --- a/fs/nfs/inode.c
44599 +++ b/fs/nfs/inode.c
44600 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44601 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44602 nfsi->attrtimeo_timestamp = jiffies;
44603
44604 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44605 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44606 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44607 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44608 else
44609 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44610 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44611 }
44612
44613 -static atomic_long_t nfs_attr_generation_counter;
44614 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44615
44616 static unsigned long nfs_read_attr_generation_counter(void)
44617 {
44618 - return atomic_long_read(&nfs_attr_generation_counter);
44619 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44620 }
44621
44622 unsigned long nfs_inc_attr_generation_counter(void)
44623 {
44624 - return atomic_long_inc_return(&nfs_attr_generation_counter);
44625 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44626 }
44627
44628 void nfs_fattr_init(struct nfs_fattr *fattr)
44629 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
44630 index 7a2e442..8e544cc 100644
44631 --- a/fs/nfsd/vfs.c
44632 +++ b/fs/nfsd/vfs.c
44633 @@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44634 } else {
44635 oldfs = get_fs();
44636 set_fs(KERNEL_DS);
44637 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44638 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44639 set_fs(oldfs);
44640 }
44641
44642 @@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44643
44644 /* Write the data. */
44645 oldfs = get_fs(); set_fs(KERNEL_DS);
44646 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44647 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44648 set_fs(oldfs);
44649 if (host_err < 0)
44650 goto out_nfserr;
44651 @@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
44652 */
44653
44654 oldfs = get_fs(); set_fs(KERNEL_DS);
44655 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
44656 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44657 set_fs(oldfs);
44658
44659 if (host_err < 0)
44660 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
44661 index 9fde1c0..14e8827 100644
44662 --- a/fs/notify/fanotify/fanotify_user.c
44663 +++ b/fs/notify/fanotify/fanotify_user.c
44664 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
44665 goto out_close_fd;
44666
44667 ret = -EFAULT;
44668 - if (copy_to_user(buf, &fanotify_event_metadata,
44669 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44670 + copy_to_user(buf, &fanotify_event_metadata,
44671 fanotify_event_metadata.event_len))
44672 goto out_kill_access_response;
44673
44674 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
44675 index ee18815..7aa5d01 100644
44676 --- a/fs/notify/notification.c
44677 +++ b/fs/notify/notification.c
44678 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
44679 * get set to 0 so it will never get 'freed'
44680 */
44681 static struct fsnotify_event *q_overflow_event;
44682 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44683 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44684
44685 /**
44686 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44687 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44688 */
44689 u32 fsnotify_get_cookie(void)
44690 {
44691 - return atomic_inc_return(&fsnotify_sync_cookie);
44692 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44693 }
44694 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44695
44696 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
44697 index 99e3610..02c1068 100644
44698 --- a/fs/ntfs/dir.c
44699 +++ b/fs/ntfs/dir.c
44700 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
44701 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44702 ~(s64)(ndir->itype.index.block_size - 1)));
44703 /* Bounds checks. */
44704 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44705 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44706 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44707 "inode 0x%lx or driver bug.", vdir->i_ino);
44708 goto err_out;
44709 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
44710 index c587e2d..3641eaa 100644
44711 --- a/fs/ntfs/file.c
44712 +++ b/fs/ntfs/file.c
44713 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
44714 #endif /* NTFS_RW */
44715 };
44716
44717 -const struct file_operations ntfs_empty_file_ops = {};
44718 +const struct file_operations ntfs_empty_file_ops __read_only;
44719
44720 -const struct inode_operations ntfs_empty_inode_ops = {};
44721 +const struct inode_operations ntfs_empty_inode_ops __read_only;
44722 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
44723 index 210c352..a174f83 100644
44724 --- a/fs/ocfs2/localalloc.c
44725 +++ b/fs/ocfs2/localalloc.c
44726 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
44727 goto bail;
44728 }
44729
44730 - atomic_inc(&osb->alloc_stats.moves);
44731 + atomic_inc_unchecked(&osb->alloc_stats.moves);
44732
44733 bail:
44734 if (handle)
44735 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
44736 index d355e6e..578d905 100644
44737 --- a/fs/ocfs2/ocfs2.h
44738 +++ b/fs/ocfs2/ocfs2.h
44739 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
44740
44741 struct ocfs2_alloc_stats
44742 {
44743 - atomic_t moves;
44744 - atomic_t local_data;
44745 - atomic_t bitmap_data;
44746 - atomic_t bg_allocs;
44747 - atomic_t bg_extends;
44748 + atomic_unchecked_t moves;
44749 + atomic_unchecked_t local_data;
44750 + atomic_unchecked_t bitmap_data;
44751 + atomic_unchecked_t bg_allocs;
44752 + atomic_unchecked_t bg_extends;
44753 };
44754
44755 enum ocfs2_local_alloc_state
44756 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
44757 index ba5d97e..c77db25 100644
44758 --- a/fs/ocfs2/suballoc.c
44759 +++ b/fs/ocfs2/suballoc.c
44760 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
44761 mlog_errno(status);
44762 goto bail;
44763 }
44764 - atomic_inc(&osb->alloc_stats.bg_extends);
44765 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44766
44767 /* You should never ask for this much metadata */
44768 BUG_ON(bits_wanted >
44769 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
44770 mlog_errno(status);
44771 goto bail;
44772 }
44773 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44774 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44775
44776 *suballoc_loc = res.sr_bg_blkno;
44777 *suballoc_bit_start = res.sr_bit_offset;
44778 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
44779 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
44780 res->sr_bits);
44781
44782 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44783 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44784
44785 BUG_ON(res->sr_bits != 1);
44786
44787 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
44788 mlog_errno(status);
44789 goto bail;
44790 }
44791 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44792 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44793
44794 BUG_ON(res.sr_bits != 1);
44795
44796 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
44797 cluster_start,
44798 num_clusters);
44799 if (!status)
44800 - atomic_inc(&osb->alloc_stats.local_data);
44801 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
44802 } else {
44803 if (min_clusters > (osb->bitmap_cpg - 1)) {
44804 /* The only paths asking for contiguousness
44805 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
44806 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44807 res.sr_bg_blkno,
44808 res.sr_bit_offset);
44809 - atomic_inc(&osb->alloc_stats.bitmap_data);
44810 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44811 *num_clusters = res.sr_bits;
44812 }
44813 }
44814 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
44815 index 4994f8b..eaab8eb 100644
44816 --- a/fs/ocfs2/super.c
44817 +++ b/fs/ocfs2/super.c
44818 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
44819 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44820 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44821 "Stats",
44822 - atomic_read(&osb->alloc_stats.bitmap_data),
44823 - atomic_read(&osb->alloc_stats.local_data),
44824 - atomic_read(&osb->alloc_stats.bg_allocs),
44825 - atomic_read(&osb->alloc_stats.moves),
44826 - atomic_read(&osb->alloc_stats.bg_extends));
44827 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44828 + atomic_read_unchecked(&osb->alloc_stats.local_data),
44829 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44830 + atomic_read_unchecked(&osb->alloc_stats.moves),
44831 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44832
44833 out += snprintf(buf + out, len - out,
44834 "%10s => State: %u Descriptor: %llu Size: %u bits "
44835 @@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
44836 spin_lock_init(&osb->osb_xattr_lock);
44837 ocfs2_init_steal_slots(osb);
44838
44839 - atomic_set(&osb->alloc_stats.moves, 0);
44840 - atomic_set(&osb->alloc_stats.local_data, 0);
44841 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
44842 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
44843 - atomic_set(&osb->alloc_stats.bg_extends, 0);
44844 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44845 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44846 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44847 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44848 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44849
44850 /* Copy the blockcheck stats from the superblock probe */
44851 osb->osb_ecc_stats = *stats;
44852 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
44853 index 5d22872..523db20 100644
44854 --- a/fs/ocfs2/symlink.c
44855 +++ b/fs/ocfs2/symlink.c
44856 @@ -142,7 +142,7 @@ bail:
44857
44858 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44859 {
44860 - char *link = nd_get_link(nd);
44861 + const char *link = nd_get_link(nd);
44862 if (!IS_ERR(link))
44863 kfree(link);
44864 }
44865 diff --git a/fs/open.c b/fs/open.c
44866 index 22c41b5..695cb17 100644
44867 --- a/fs/open.c
44868 +++ b/fs/open.c
44869 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
44870 error = locks_verify_truncate(inode, NULL, length);
44871 if (!error)
44872 error = security_path_truncate(&path);
44873 +
44874 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44875 + error = -EACCES;
44876 +
44877 if (!error)
44878 error = do_truncate(path.dentry, length, 0, NULL);
44879
44880 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
44881 if (__mnt_is_readonly(path.mnt))
44882 res = -EROFS;
44883
44884 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44885 + res = -EACCES;
44886 +
44887 out_path_release:
44888 path_put(&path);
44889 out:
44890 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
44891 if (error)
44892 goto dput_and_out;
44893
44894 + gr_log_chdir(path.dentry, path.mnt);
44895 +
44896 set_fs_pwd(current->fs, &path);
44897
44898 dput_and_out:
44899 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
44900 goto out_putf;
44901
44902 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
44903 +
44904 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44905 + error = -EPERM;
44906 +
44907 + if (!error)
44908 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44909 +
44910 if (!error)
44911 set_fs_pwd(current->fs, &file->f_path);
44912 out_putf:
44913 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
44914 if (error)
44915 goto dput_and_out;
44916
44917 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44918 + goto dput_and_out;
44919 +
44920 set_fs_root(current->fs, &path);
44921 +
44922 + gr_handle_chroot_chdir(&path);
44923 +
44924 error = 0;
44925 dput_and_out:
44926 path_put(&path);
44927 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
44928 if (error)
44929 return error;
44930 mutex_lock(&inode->i_mutex);
44931 +
44932 + if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
44933 + error = -EACCES;
44934 + goto out_unlock;
44935 + }
44936 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
44937 + error = -EACCES;
44938 + goto out_unlock;
44939 + }
44940 +
44941 error = security_path_chmod(path->dentry, path->mnt, mode);
44942 if (error)
44943 goto out_unlock;
44944 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
44945 int error;
44946 struct iattr newattrs;
44947
44948 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
44949 + return -EACCES;
44950 +
44951 newattrs.ia_valid = ATTR_CTIME;
44952 if (user != (uid_t) -1) {
44953 newattrs.ia_valid |= ATTR_UID;
44954 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
44955 index 6296b40..417c00f 100644
44956 --- a/fs/partitions/efi.c
44957 +++ b/fs/partitions/efi.c
44958 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
44959 if (!gpt)
44960 return NULL;
44961
44962 + if (!le32_to_cpu(gpt->num_partition_entries))
44963 + return NULL;
44964 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
44965 + if (!pte)
44966 + return NULL;
44967 +
44968 count = le32_to_cpu(gpt->num_partition_entries) *
44969 le32_to_cpu(gpt->sizeof_partition_entry);
44970 - if (!count)
44971 - return NULL;
44972 - pte = kzalloc(count, GFP_KERNEL);
44973 - if (!pte)
44974 - return NULL;
44975 -
44976 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
44977 (u8 *) pte,
44978 count) < count) {
44979 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
44980 index bd8ae78..539d250 100644
44981 --- a/fs/partitions/ldm.c
44982 +++ b/fs/partitions/ldm.c
44983 @@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
44984 goto found;
44985 }
44986
44987 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44988 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44989 if (!f) {
44990 ldm_crit ("Out of memory.");
44991 return false;
44992 diff --git a/fs/pipe.c b/fs/pipe.c
44993 index 4065f07..68c0706 100644
44994 --- a/fs/pipe.c
44995 +++ b/fs/pipe.c
44996 @@ -420,9 +420,9 @@ redo:
44997 }
44998 if (bufs) /* More to do? */
44999 continue;
45000 - if (!pipe->writers)
45001 + if (!atomic_read(&pipe->writers))
45002 break;
45003 - if (!pipe->waiting_writers) {
45004 + if (!atomic_read(&pipe->waiting_writers)) {
45005 /* syscall merging: Usually we must not sleep
45006 * if O_NONBLOCK is set, or if we got some data.
45007 * But if a writer sleeps in kernel space, then
45008 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45009 mutex_lock(&inode->i_mutex);
45010 pipe = inode->i_pipe;
45011
45012 - if (!pipe->readers) {
45013 + if (!atomic_read(&pipe->readers)) {
45014 send_sig(SIGPIPE, current, 0);
45015 ret = -EPIPE;
45016 goto out;
45017 @@ -530,7 +530,7 @@ redo1:
45018 for (;;) {
45019 int bufs;
45020
45021 - if (!pipe->readers) {
45022 + if (!atomic_read(&pipe->readers)) {
45023 send_sig(SIGPIPE, current, 0);
45024 if (!ret)
45025 ret = -EPIPE;
45026 @@ -616,9 +616,9 @@ redo2:
45027 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45028 do_wakeup = 0;
45029 }
45030 - pipe->waiting_writers++;
45031 + atomic_inc(&pipe->waiting_writers);
45032 pipe_wait(pipe);
45033 - pipe->waiting_writers--;
45034 + atomic_dec(&pipe->waiting_writers);
45035 }
45036 out:
45037 mutex_unlock(&inode->i_mutex);
45038 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45039 mask = 0;
45040 if (filp->f_mode & FMODE_READ) {
45041 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45042 - if (!pipe->writers && filp->f_version != pipe->w_counter)
45043 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45044 mask |= POLLHUP;
45045 }
45046
45047 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45048 * Most Unices do not set POLLERR for FIFOs but on Linux they
45049 * behave exactly like pipes for poll().
45050 */
45051 - if (!pipe->readers)
45052 + if (!atomic_read(&pipe->readers))
45053 mask |= POLLERR;
45054 }
45055
45056 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45057
45058 mutex_lock(&inode->i_mutex);
45059 pipe = inode->i_pipe;
45060 - pipe->readers -= decr;
45061 - pipe->writers -= decw;
45062 + atomic_sub(decr, &pipe->readers);
45063 + atomic_sub(decw, &pipe->writers);
45064
45065 - if (!pipe->readers && !pipe->writers) {
45066 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45067 free_pipe_info(inode);
45068 } else {
45069 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45070 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45071
45072 if (inode->i_pipe) {
45073 ret = 0;
45074 - inode->i_pipe->readers++;
45075 + atomic_inc(&inode->i_pipe->readers);
45076 }
45077
45078 mutex_unlock(&inode->i_mutex);
45079 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45080
45081 if (inode->i_pipe) {
45082 ret = 0;
45083 - inode->i_pipe->writers++;
45084 + atomic_inc(&inode->i_pipe->writers);
45085 }
45086
45087 mutex_unlock(&inode->i_mutex);
45088 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45089 if (inode->i_pipe) {
45090 ret = 0;
45091 if (filp->f_mode & FMODE_READ)
45092 - inode->i_pipe->readers++;
45093 + atomic_inc(&inode->i_pipe->readers);
45094 if (filp->f_mode & FMODE_WRITE)
45095 - inode->i_pipe->writers++;
45096 + atomic_inc(&inode->i_pipe->writers);
45097 }
45098
45099 mutex_unlock(&inode->i_mutex);
45100 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45101 inode->i_pipe = NULL;
45102 }
45103
45104 -static struct vfsmount *pipe_mnt __read_mostly;
45105 +struct vfsmount *pipe_mnt __read_mostly;
45106
45107 /*
45108 * pipefs_dname() is called from d_path().
45109 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45110 goto fail_iput;
45111 inode->i_pipe = pipe;
45112
45113 - pipe->readers = pipe->writers = 1;
45114 + atomic_set(&pipe->readers, 1);
45115 + atomic_set(&pipe->writers, 1);
45116 inode->i_fop = &rdwr_pipefifo_fops;
45117
45118 /*
45119 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45120 index 15af622..0e9f4467 100644
45121 --- a/fs/proc/Kconfig
45122 +++ b/fs/proc/Kconfig
45123 @@ -30,12 +30,12 @@ config PROC_FS
45124
45125 config PROC_KCORE
45126 bool "/proc/kcore support" if !ARM
45127 - depends on PROC_FS && MMU
45128 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45129
45130 config PROC_VMCORE
45131 bool "/proc/vmcore support"
45132 - depends on PROC_FS && CRASH_DUMP
45133 - default y
45134 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45135 + default n
45136 help
45137 Exports the dump image of crashed kernel in ELF format.
45138
45139 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45140 limited in memory.
45141
45142 config PROC_PAGE_MONITOR
45143 - default y
45144 - depends on PROC_FS && MMU
45145 + default n
45146 + depends on PROC_FS && MMU && !GRKERNSEC
45147 bool "Enable /proc page monitoring" if EXPERT
45148 help
45149 Various /proc files exist to monitor process memory utilization:
45150 diff --git a/fs/proc/array.c b/fs/proc/array.c
45151 index 3a1dafd..d41fc37 100644
45152 --- a/fs/proc/array.c
45153 +++ b/fs/proc/array.c
45154 @@ -60,6 +60,7 @@
45155 #include <linux/tty.h>
45156 #include <linux/string.h>
45157 #include <linux/mman.h>
45158 +#include <linux/grsecurity.h>
45159 #include <linux/proc_fs.h>
45160 #include <linux/ioport.h>
45161 #include <linux/uaccess.h>
45162 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45163 seq_putc(m, '\n');
45164 }
45165
45166 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45167 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
45168 +{
45169 + if (p->mm)
45170 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45171 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45172 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45173 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45174 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45175 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45176 + else
45177 + seq_printf(m, "PaX:\t-----\n");
45178 +}
45179 +#endif
45180 +
45181 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45182 struct pid *pid, struct task_struct *task)
45183 {
45184 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45185 task_cpus_allowed(m, task);
45186 cpuset_task_status_allowed(m, task);
45187 task_context_switch_counts(m, task);
45188 +
45189 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45190 + task_pax(m, task);
45191 +#endif
45192 +
45193 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45194 + task_grsec_rbac(m, task);
45195 +#endif
45196 +
45197 return 0;
45198 }
45199
45200 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45201 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45202 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45203 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45204 +#endif
45205 +
45206 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45207 struct pid *pid, struct task_struct *task, int whole)
45208 {
45209 @@ -449,6 +480,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45210 gtime = task->gtime;
45211 }
45212
45213 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45214 + if (PAX_RAND_FLAGS(mm)) {
45215 + eip = 0;
45216 + esp = 0;
45217 + wchan = 0;
45218 + }
45219 +#endif
45220 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45221 + wchan = 0;
45222 + eip =0;
45223 + esp =0;
45224 +#endif
45225 +
45226 /* scale priority and nice values from timeslices to -20..20 */
45227 /* to make it look like a "normal" Unix priority/nice value */
45228 priority = task_prio(task);
45229 @@ -489,9 +533,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45230 vsize,
45231 mm ? get_mm_rss(mm) : 0,
45232 rsslim,
45233 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45234 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45235 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45236 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45237 +#else
45238 mm ? (permitted ? mm->start_code : 1) : 0,
45239 mm ? (permitted ? mm->end_code : 1) : 0,
45240 (permitted && mm) ? mm->start_stack : 0,
45241 +#endif
45242 esp,
45243 eip,
45244 /* The signal information here is obsolete.
45245 @@ -544,3 +594,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45246
45247 return 0;
45248 }
45249 +
45250 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45251 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45252 +{
45253 + u32 curr_ip = 0;
45254 + unsigned long flags;
45255 +
45256 + if (lock_task_sighand(task, &flags)) {
45257 + curr_ip = task->signal->curr_ip;
45258 + unlock_task_sighand(task, &flags);
45259 + }
45260 +
45261 + return sprintf(buffer, "%pI4\n", &curr_ip);
45262 +}
45263 +#endif
45264 diff --git a/fs/proc/base.c b/fs/proc/base.c
45265 index 1fc1dca..357b933 100644
45266 --- a/fs/proc/base.c
45267 +++ b/fs/proc/base.c
45268 @@ -107,6 +107,22 @@ struct pid_entry {
45269 union proc_op op;
45270 };
45271
45272 +struct getdents_callback {
45273 + struct linux_dirent __user * current_dir;
45274 + struct linux_dirent __user * previous;
45275 + struct file * file;
45276 + int count;
45277 + int error;
45278 +};
45279 +
45280 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45281 + loff_t offset, u64 ino, unsigned int d_type)
45282 +{
45283 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45284 + buf->error = -EINVAL;
45285 + return 0;
45286 +}
45287 +
45288 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45289 .name = (NAME), \
45290 .len = sizeof(NAME) - 1, \
45291 @@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
45292 return result;
45293 }
45294
45295 -static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45296 -{
45297 - struct mm_struct *mm;
45298 - int err;
45299 -
45300 - err = mutex_lock_killable(&task->signal->cred_guard_mutex);
45301 - if (err)
45302 - return ERR_PTR(err);
45303 -
45304 - mm = get_task_mm(task);
45305 - if (mm && mm != current->mm &&
45306 - !ptrace_may_access(task, mode)) {
45307 - mmput(mm);
45308 - mm = ERR_PTR(-EACCES);
45309 - }
45310 - mutex_unlock(&task->signal->cred_guard_mutex);
45311 -
45312 - return mm;
45313 -}
45314 -
45315 struct mm_struct *mm_for_maps(struct task_struct *task)
45316 {
45317 return mm_access(task, PTRACE_MODE_READ);
45318 @@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45319 if (!mm->arg_end)
45320 goto out_mm; /* Shh! No looking before we're done */
45321
45322 + if (gr_acl_handle_procpidmem(task))
45323 + goto out_mm;
45324 +
45325 len = mm->arg_end - mm->arg_start;
45326
45327 if (len > PAGE_SIZE)
45328 @@ -256,12 +255,28 @@ out:
45329 return res;
45330 }
45331
45332 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45333 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45334 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45335 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45336 +#endif
45337 +
45338 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45339 {
45340 struct mm_struct *mm = mm_for_maps(task);
45341 int res = PTR_ERR(mm);
45342 if (mm && !IS_ERR(mm)) {
45343 unsigned int nwords = 0;
45344 +
45345 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45346 + /* allow if we're currently ptracing this task */
45347 + if (PAX_RAND_FLAGS(mm) &&
45348 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45349 + mmput(mm);
45350 + return 0;
45351 + }
45352 +#endif
45353 +
45354 do {
45355 nwords += 2;
45356 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45357 @@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45358 }
45359
45360
45361 -#ifdef CONFIG_KALLSYMS
45362 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45363 /*
45364 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45365 * Returns the resolved symbol. If that fails, simply return the address.
45366 @@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task)
45367 mutex_unlock(&task->signal->cred_guard_mutex);
45368 }
45369
45370 -#ifdef CONFIG_STACKTRACE
45371 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45372
45373 #define MAX_STACK_TRACE_DEPTH 64
45374
45375 @@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45376 return count;
45377 }
45378
45379 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45380 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45381 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45382 {
45383 long nr;
45384 @@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45385 /************************************************************************/
45386
45387 /* permission checks */
45388 -static int proc_fd_access_allowed(struct inode *inode)
45389 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45390 {
45391 struct task_struct *task;
45392 int allowed = 0;
45393 @@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45394 */
45395 task = get_proc_task(inode);
45396 if (task) {
45397 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45398 + if (log)
45399 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45400 + else
45401 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45402 put_task_struct(task);
45403 }
45404 return allowed;
45405 @@ -775,6 +793,13 @@ static int mem_open(struct inode* inode, struct file* file)
45406 if (IS_ERR(mm))
45407 return PTR_ERR(mm);
45408
45409 + if (mm) {
45410 + /* ensure this mm_struct can't be freed */
45411 + atomic_inc(&mm->mm_count);
45412 + /* but do not pin its memory */
45413 + mmput(mm);
45414 + }
45415 +
45416 /* OK to pass negative loff_t, we can catch out-of-range */
45417 file->f_mode |= FMODE_UNSIGNED_OFFSET;
45418 file->private_data = mm;
45419 @@ -782,57 +807,18 @@ static int mem_open(struct inode* inode, struct file* file)
45420 return 0;
45421 }
45422
45423 -static ssize_t mem_read(struct file * file, char __user * buf,
45424 - size_t count, loff_t *ppos)
45425 +static ssize_t mem_rw(struct file *file, char __user *buf,
45426 + size_t count, loff_t *ppos, int write)
45427 {
45428 - int ret;
45429 - char *page;
45430 - unsigned long src = *ppos;
45431 struct mm_struct *mm = file->private_data;
45432 -
45433 - if (!mm)
45434 - return 0;
45435 -
45436 - page = (char *)__get_free_page(GFP_TEMPORARY);
45437 - if (!page)
45438 - return -ENOMEM;
45439 -
45440 - ret = 0;
45441 -
45442 - while (count > 0) {
45443 - int this_len, retval;
45444 -
45445 - this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
45446 - retval = access_remote_vm(mm, src, page, this_len, 0);
45447 - if (!retval) {
45448 - if (!ret)
45449 - ret = -EIO;
45450 - break;
45451 - }
45452 -
45453 - if (copy_to_user(buf, page, retval)) {
45454 - ret = -EFAULT;
45455 - break;
45456 - }
45457 -
45458 - ret += retval;
45459 - src += retval;
45460 - buf += retval;
45461 - count -= retval;
45462 - }
45463 - *ppos = src;
45464 -
45465 - free_page((unsigned long) page);
45466 - return ret;
45467 -}
45468 -
45469 -static ssize_t mem_write(struct file * file, const char __user *buf,
45470 - size_t count, loff_t *ppos)
45471 -{
45472 - int copied;
45473 + unsigned long addr = *ppos;
45474 + ssize_t copied;
45475 char *page;
45476 - unsigned long dst = *ppos;
45477 - struct mm_struct *mm = file->private_data;
45478 +
45479 +#ifdef CONFIG_GRKERNSEC
45480 + if (write)
45481 + return -EPERM;
45482 +#endif
45483
45484 if (!mm)
45485 return 0;
45486 @@ -842,31 +828,54 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
45487 return -ENOMEM;
45488
45489 copied = 0;
45490 + if (!atomic_inc_not_zero(&mm->mm_users))
45491 + goto free;
45492 +
45493 while (count > 0) {
45494 - int this_len, retval;
45495 + int this_len = min_t(int, count, PAGE_SIZE);
45496
45497 - this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
45498 - if (copy_from_user(page, buf, this_len)) {
45499 + if (write && copy_from_user(page, buf, this_len)) {
45500 copied = -EFAULT;
45501 break;
45502 }
45503 - retval = access_remote_vm(mm, dst, page, this_len, 1);
45504 - if (!retval) {
45505 +
45506 + this_len = access_remote_vm(mm, addr, page, this_len, write);
45507 + if (!this_len) {
45508 if (!copied)
45509 copied = -EIO;
45510 break;
45511 }
45512 - copied += retval;
45513 - buf += retval;
45514 - dst += retval;
45515 - count -= retval;
45516 +
45517 + if (!write && copy_to_user(buf, page, this_len)) {
45518 + copied = -EFAULT;
45519 + break;
45520 + }
45521 +
45522 + buf += this_len;
45523 + addr += this_len;
45524 + copied += this_len;
45525 + count -= this_len;
45526 }
45527 - *ppos = dst;
45528 + *ppos = addr;
45529
45530 + mmput(mm);
45531 +free:
45532 free_page((unsigned long) page);
45533 return copied;
45534 }
45535
45536 +static ssize_t mem_read(struct file *file, char __user *buf,
45537 + size_t count, loff_t *ppos)
45538 +{
45539 + return mem_rw(file, buf, count, ppos, 0);
45540 +}
45541 +
45542 +static ssize_t mem_write(struct file *file, const char __user *buf,
45543 + size_t count, loff_t *ppos)
45544 +{
45545 + return mem_rw(file, (char __user*)buf, count, ppos, 1);
45546 +}
45547 +
45548 loff_t mem_lseek(struct file *file, loff_t offset, int orig)
45549 {
45550 switch (orig) {
45551 @@ -886,8 +895,8 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
45552 static int mem_release(struct inode *inode, struct file *file)
45553 {
45554 struct mm_struct *mm = file->private_data;
45555 -
45556 - mmput(mm);
45557 + if (mm)
45558 + mmdrop(mm);
45559 return 0;
45560 }
45561
45562 @@ -911,6 +920,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45563 if (!task)
45564 goto out_no_task;
45565
45566 + if (gr_acl_handle_procpidmem(task))
45567 + goto out;
45568 +
45569 ret = -ENOMEM;
45570 page = (char *)__get_free_page(GFP_TEMPORARY);
45571 if (!page)
45572 @@ -1533,7 +1545,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45573 path_put(&nd->path);
45574
45575 /* Are we allowed to snoop on the tasks file descriptors? */
45576 - if (!proc_fd_access_allowed(inode))
45577 + if (!proc_fd_access_allowed(inode,0))
45578 goto out;
45579
45580 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45581 @@ -1572,8 +1584,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45582 struct path path;
45583
45584 /* Are we allowed to snoop on the tasks file descriptors? */
45585 - if (!proc_fd_access_allowed(inode))
45586 - goto out;
45587 + /* logging this is needed for learning on chromium to work properly,
45588 + but we don't want to flood the logs from 'ps' which does a readlink
45589 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45590 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45591 + */
45592 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45593 + if (!proc_fd_access_allowed(inode,0))
45594 + goto out;
45595 + } else {
45596 + if (!proc_fd_access_allowed(inode,1))
45597 + goto out;
45598 + }
45599
45600 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45601 if (error)
45602 @@ -1638,7 +1660,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45603 rcu_read_lock();
45604 cred = __task_cred(task);
45605 inode->i_uid = cred->euid;
45606 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45607 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45608 +#else
45609 inode->i_gid = cred->egid;
45610 +#endif
45611 rcu_read_unlock();
45612 }
45613 security_task_to_inode(task, inode);
45614 @@ -1656,6 +1682,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45615 struct inode *inode = dentry->d_inode;
45616 struct task_struct *task;
45617 const struct cred *cred;
45618 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45619 + const struct cred *tmpcred = current_cred();
45620 +#endif
45621
45622 generic_fillattr(inode, stat);
45623
45624 @@ -1663,13 +1692,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45625 stat->uid = 0;
45626 stat->gid = 0;
45627 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45628 +
45629 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45630 + rcu_read_unlock();
45631 + return -ENOENT;
45632 + }
45633 +
45634 if (task) {
45635 + cred = __task_cred(task);
45636 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45637 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45638 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45639 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45640 +#endif
45641 + ) {
45642 +#endif
45643 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45644 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45645 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45646 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45647 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45648 +#endif
45649 task_dumpable(task)) {
45650 - cred = __task_cred(task);
45651 stat->uid = cred->euid;
45652 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45653 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45654 +#else
45655 stat->gid = cred->egid;
45656 +#endif
45657 }
45658 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45659 + } else {
45660 + rcu_read_unlock();
45661 + return -ENOENT;
45662 + }
45663 +#endif
45664 }
45665 rcu_read_unlock();
45666 return 0;
45667 @@ -1706,11 +1763,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45668
45669 if (task) {
45670 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45671 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45672 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45673 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45674 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45675 +#endif
45676 task_dumpable(task)) {
45677 rcu_read_lock();
45678 cred = __task_cred(task);
45679 inode->i_uid = cred->euid;
45680 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45681 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45682 +#else
45683 inode->i_gid = cred->egid;
45684 +#endif
45685 rcu_read_unlock();
45686 } else {
45687 inode->i_uid = 0;
45688 @@ -1828,7 +1894,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45689 int fd = proc_fd(inode);
45690
45691 if (task) {
45692 - files = get_files_struct(task);
45693 + if (!gr_acl_handle_procpidmem(task))
45694 + files = get_files_struct(task);
45695 put_task_struct(task);
45696 }
45697 if (files) {
45698 @@ -2096,11 +2163,21 @@ static const struct file_operations proc_fd_operations = {
45699 */
45700 static int proc_fd_permission(struct inode *inode, int mask)
45701 {
45702 + struct task_struct *task;
45703 int rv = generic_permission(inode, mask);
45704 - if (rv == 0)
45705 - return 0;
45706 +
45707 if (task_pid(current) == proc_pid(inode))
45708 rv = 0;
45709 +
45710 + task = get_proc_task(inode);
45711 + if (task == NULL)
45712 + return rv;
45713 +
45714 + if (gr_acl_handle_procpidmem(task))
45715 + rv = -EACCES;
45716 +
45717 + put_task_struct(task);
45718 +
45719 return rv;
45720 }
45721
45722 @@ -2210,6 +2287,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
45723 if (!task)
45724 goto out_no_task;
45725
45726 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45727 + goto out;
45728 +
45729 /*
45730 * Yes, it does not scale. And it should not. Don't add
45731 * new entries into /proc/<tgid>/ without very good reasons.
45732 @@ -2254,6 +2334,9 @@ static int proc_pident_readdir(struct file *filp,
45733 if (!task)
45734 goto out_no_task;
45735
45736 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45737 + goto out;
45738 +
45739 ret = 0;
45740 i = filp->f_pos;
45741 switch (i) {
45742 @@ -2524,7 +2607,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
45743 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45744 void *cookie)
45745 {
45746 - char *s = nd_get_link(nd);
45747 + const char *s = nd_get_link(nd);
45748 if (!IS_ERR(s))
45749 __putname(s);
45750 }
45751 @@ -2722,7 +2805,7 @@ static const struct pid_entry tgid_base_stuff[] = {
45752 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45753 #endif
45754 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45755 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45756 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45757 INF("syscall", S_IRUGO, proc_pid_syscall),
45758 #endif
45759 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45760 @@ -2747,10 +2830,10 @@ static const struct pid_entry tgid_base_stuff[] = {
45761 #ifdef CONFIG_SECURITY
45762 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45763 #endif
45764 -#ifdef CONFIG_KALLSYMS
45765 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45766 INF("wchan", S_IRUGO, proc_pid_wchan),
45767 #endif
45768 -#ifdef CONFIG_STACKTRACE
45769 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45770 ONE("stack", S_IRUGO, proc_pid_stack),
45771 #endif
45772 #ifdef CONFIG_SCHEDSTATS
45773 @@ -2784,6 +2867,9 @@ static const struct pid_entry tgid_base_stuff[] = {
45774 #ifdef CONFIG_HARDWALL
45775 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45776 #endif
45777 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45778 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45779 +#endif
45780 };
45781
45782 static int proc_tgid_base_readdir(struct file * filp,
45783 @@ -2909,7 +2995,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
45784 if (!inode)
45785 goto out;
45786
45787 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45788 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45789 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45790 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45791 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45792 +#else
45793 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45794 +#endif
45795 inode->i_op = &proc_tgid_base_inode_operations;
45796 inode->i_fop = &proc_tgid_base_operations;
45797 inode->i_flags|=S_IMMUTABLE;
45798 @@ -2951,7 +3044,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
45799 if (!task)
45800 goto out;
45801
45802 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45803 + goto out_put_task;
45804 +
45805 result = proc_pid_instantiate(dir, dentry, task, NULL);
45806 +out_put_task:
45807 put_task_struct(task);
45808 out:
45809 return result;
45810 @@ -3016,6 +3113,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45811 {
45812 unsigned int nr;
45813 struct task_struct *reaper;
45814 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45815 + const struct cred *tmpcred = current_cred();
45816 + const struct cred *itercred;
45817 +#endif
45818 + filldir_t __filldir = filldir;
45819 struct tgid_iter iter;
45820 struct pid_namespace *ns;
45821
45822 @@ -3039,8 +3141,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45823 for (iter = next_tgid(ns, iter);
45824 iter.task;
45825 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45826 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45827 + rcu_read_lock();
45828 + itercred = __task_cred(iter.task);
45829 +#endif
45830 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45831 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45832 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45833 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45834 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45835 +#endif
45836 + )
45837 +#endif
45838 + )
45839 + __filldir = &gr_fake_filldir;
45840 + else
45841 + __filldir = filldir;
45842 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45843 + rcu_read_unlock();
45844 +#endif
45845 filp->f_pos = iter.tgid + TGID_OFFSET;
45846 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45847 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45848 put_task_struct(iter.task);
45849 goto out;
45850 }
45851 @@ -3068,7 +3189,7 @@ static const struct pid_entry tid_base_stuff[] = {
45852 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45853 #endif
45854 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45855 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45856 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45857 INF("syscall", S_IRUGO, proc_pid_syscall),
45858 #endif
45859 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45860 @@ -3092,10 +3213,10 @@ static const struct pid_entry tid_base_stuff[] = {
45861 #ifdef CONFIG_SECURITY
45862 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45863 #endif
45864 -#ifdef CONFIG_KALLSYMS
45865 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45866 INF("wchan", S_IRUGO, proc_pid_wchan),
45867 #endif
45868 -#ifdef CONFIG_STACKTRACE
45869 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45870 ONE("stack", S_IRUGO, proc_pid_stack),
45871 #endif
45872 #ifdef CONFIG_SCHEDSTATS
45873 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
45874 index 82676e3..5f8518a 100644
45875 --- a/fs/proc/cmdline.c
45876 +++ b/fs/proc/cmdline.c
45877 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
45878
45879 static int __init proc_cmdline_init(void)
45880 {
45881 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45882 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45883 +#else
45884 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45885 +#endif
45886 return 0;
45887 }
45888 module_init(proc_cmdline_init);
45889 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
45890 index b143471..bb105e5 100644
45891 --- a/fs/proc/devices.c
45892 +++ b/fs/proc/devices.c
45893 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
45894
45895 static int __init proc_devices_init(void)
45896 {
45897 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45898 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45899 +#else
45900 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45901 +#endif
45902 return 0;
45903 }
45904 module_init(proc_devices_init);
45905 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
45906 index 7737c54..7172574 100644
45907 --- a/fs/proc/inode.c
45908 +++ b/fs/proc/inode.c
45909 @@ -18,12 +18,18 @@
45910 #include <linux/module.h>
45911 #include <linux/sysctl.h>
45912 #include <linux/slab.h>
45913 +#include <linux/grsecurity.h>
45914
45915 #include <asm/system.h>
45916 #include <asm/uaccess.h>
45917
45918 #include "internal.h"
45919
45920 +#ifdef CONFIG_PROC_SYSCTL
45921 +extern const struct inode_operations proc_sys_inode_operations;
45922 +extern const struct inode_operations proc_sys_dir_operations;
45923 +#endif
45924 +
45925 static void proc_evict_inode(struct inode *inode)
45926 {
45927 struct proc_dir_entry *de;
45928 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
45929 ns_ops = PROC_I(inode)->ns_ops;
45930 if (ns_ops && ns_ops->put)
45931 ns_ops->put(PROC_I(inode)->ns);
45932 +
45933 +#ifdef CONFIG_PROC_SYSCTL
45934 + if (inode->i_op == &proc_sys_inode_operations ||
45935 + inode->i_op == &proc_sys_dir_operations)
45936 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45937 +#endif
45938 +
45939 }
45940
45941 static struct kmem_cache * proc_inode_cachep;
45942 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
45943 if (de->mode) {
45944 inode->i_mode = de->mode;
45945 inode->i_uid = de->uid;
45946 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45947 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45948 +#else
45949 inode->i_gid = de->gid;
45950 +#endif
45951 }
45952 if (de->size)
45953 inode->i_size = de->size;
45954 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
45955 index 7838e5c..ff92cbc 100644
45956 --- a/fs/proc/internal.h
45957 +++ b/fs/proc/internal.h
45958 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45959 struct pid *pid, struct task_struct *task);
45960 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45961 struct pid *pid, struct task_struct *task);
45962 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45963 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45964 +#endif
45965 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45966
45967 extern const struct file_operations proc_maps_operations;
45968 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
45969 index d245cb2..f4e8498 100644
45970 --- a/fs/proc/kcore.c
45971 +++ b/fs/proc/kcore.c
45972 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
45973 * the addresses in the elf_phdr on our list.
45974 */
45975 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45976 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45977 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45978 + if (tsz > buflen)
45979 tsz = buflen;
45980 -
45981 +
45982 while (buflen) {
45983 struct kcore_list *m;
45984
45985 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
45986 kfree(elf_buf);
45987 } else {
45988 if (kern_addr_valid(start)) {
45989 - unsigned long n;
45990 + char *elf_buf;
45991 + mm_segment_t oldfs;
45992
45993 - n = copy_to_user(buffer, (char *)start, tsz);
45994 - /*
45995 - * We cannot distingush between fault on source
45996 - * and fault on destination. When this happens
45997 - * we clear too and hope it will trigger the
45998 - * EFAULT again.
45999 - */
46000 - if (n) {
46001 - if (clear_user(buffer + tsz - n,
46002 - n))
46003 + elf_buf = kmalloc(tsz, GFP_KERNEL);
46004 + if (!elf_buf)
46005 + return -ENOMEM;
46006 + oldfs = get_fs();
46007 + set_fs(KERNEL_DS);
46008 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46009 + set_fs(oldfs);
46010 + if (copy_to_user(buffer, elf_buf, tsz)) {
46011 + kfree(elf_buf);
46012 return -EFAULT;
46013 + }
46014 }
46015 + set_fs(oldfs);
46016 + kfree(elf_buf);
46017 } else {
46018 if (clear_user(buffer, tsz))
46019 return -EFAULT;
46020 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46021
46022 static int open_kcore(struct inode *inode, struct file *filp)
46023 {
46024 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46025 + return -EPERM;
46026 +#endif
46027 if (!capable(CAP_SYS_RAWIO))
46028 return -EPERM;
46029 if (kcore_need_update)
46030 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46031 index 80e4645..53e5fcf 100644
46032 --- a/fs/proc/meminfo.c
46033 +++ b/fs/proc/meminfo.c
46034 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46035 vmi.used >> 10,
46036 vmi.largest_chunk >> 10
46037 #ifdef CONFIG_MEMORY_FAILURE
46038 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46039 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46040 #endif
46041 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46042 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46043 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46044 index b1822dd..df622cb 100644
46045 --- a/fs/proc/nommu.c
46046 +++ b/fs/proc/nommu.c
46047 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46048 if (len < 1)
46049 len = 1;
46050 seq_printf(m, "%*c", len, ' ');
46051 - seq_path(m, &file->f_path, "");
46052 + seq_path(m, &file->f_path, "\n\\");
46053 }
46054
46055 seq_putc(m, '\n');
46056 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46057 index f738024..876984a 100644
46058 --- a/fs/proc/proc_net.c
46059 +++ b/fs/proc/proc_net.c
46060 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46061 struct task_struct *task;
46062 struct nsproxy *ns;
46063 struct net *net = NULL;
46064 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46065 + const struct cred *cred = current_cred();
46066 +#endif
46067 +
46068 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46069 + if (cred->fsuid)
46070 + return net;
46071 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46072 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46073 + return net;
46074 +#endif
46075
46076 rcu_read_lock();
46077 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46078 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46079 index a6b6217..1e0579d 100644
46080 --- a/fs/proc/proc_sysctl.c
46081 +++ b/fs/proc/proc_sysctl.c
46082 @@ -9,11 +9,13 @@
46083 #include <linux/namei.h>
46084 #include "internal.h"
46085
46086 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46087 +
46088 static const struct dentry_operations proc_sys_dentry_operations;
46089 static const struct file_operations proc_sys_file_operations;
46090 -static const struct inode_operations proc_sys_inode_operations;
46091 +const struct inode_operations proc_sys_inode_operations;
46092 static const struct file_operations proc_sys_dir_file_operations;
46093 -static const struct inode_operations proc_sys_dir_operations;
46094 +const struct inode_operations proc_sys_dir_operations;
46095
46096 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46097 {
46098 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46099
46100 err = NULL;
46101 d_set_d_op(dentry, &proc_sys_dentry_operations);
46102 +
46103 + gr_handle_proc_create(dentry, inode);
46104 +
46105 d_add(dentry, inode);
46106
46107 + if (gr_handle_sysctl(p, MAY_EXEC))
46108 + err = ERR_PTR(-ENOENT);
46109 +
46110 out:
46111 sysctl_head_finish(head);
46112 return err;
46113 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46114 if (!table->proc_handler)
46115 goto out;
46116
46117 +#ifdef CONFIG_GRKERNSEC
46118 + error = -EPERM;
46119 + if (write && !capable(CAP_SYS_ADMIN))
46120 + goto out;
46121 +#endif
46122 +
46123 /* careful: calling conventions are nasty here */
46124 res = count;
46125 error = table->proc_handler(table, write, buf, &res, ppos);
46126 @@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46127 return -ENOMEM;
46128 } else {
46129 d_set_d_op(child, &proc_sys_dentry_operations);
46130 +
46131 + gr_handle_proc_create(child, inode);
46132 +
46133 d_add(child, inode);
46134 }
46135 } else {
46136 @@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46137 if (*pos < file->f_pos)
46138 continue;
46139
46140 + if (gr_handle_sysctl(table, 0))
46141 + continue;
46142 +
46143 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46144 if (res)
46145 return res;
46146 @@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46147 if (IS_ERR(head))
46148 return PTR_ERR(head);
46149
46150 + if (table && gr_handle_sysctl(table, MAY_EXEC))
46151 + return -ENOENT;
46152 +
46153 generic_fillattr(inode, stat);
46154 if (table)
46155 stat->mode = (stat->mode & S_IFMT) | table->mode;
46156 @@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46157 .llseek = generic_file_llseek,
46158 };
46159
46160 -static const struct inode_operations proc_sys_inode_operations = {
46161 +const struct inode_operations proc_sys_inode_operations = {
46162 .permission = proc_sys_permission,
46163 .setattr = proc_sys_setattr,
46164 .getattr = proc_sys_getattr,
46165 };
46166
46167 -static const struct inode_operations proc_sys_dir_operations = {
46168 +const struct inode_operations proc_sys_dir_operations = {
46169 .lookup = proc_sys_lookup,
46170 .permission = proc_sys_permission,
46171 .setattr = proc_sys_setattr,
46172 diff --git a/fs/proc/root.c b/fs/proc/root.c
46173 index 03102d9..4ae347e 100644
46174 --- a/fs/proc/root.c
46175 +++ b/fs/proc/root.c
46176 @@ -121,7 +121,15 @@ void __init proc_root_init(void)
46177 #ifdef CONFIG_PROC_DEVICETREE
46178 proc_device_tree_init();
46179 #endif
46180 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46181 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46182 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46183 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46184 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46185 +#endif
46186 +#else
46187 proc_mkdir("bus", NULL);
46188 +#endif
46189 proc_sys_init();
46190 }
46191
46192 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46193 index 7dcd2a2..d1d9cb6 100644
46194 --- a/fs/proc/task_mmu.c
46195 +++ b/fs/proc/task_mmu.c
46196 @@ -52,8 +52,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46197 "VmExe:\t%8lu kB\n"
46198 "VmLib:\t%8lu kB\n"
46199 "VmPTE:\t%8lu kB\n"
46200 - "VmSwap:\t%8lu kB\n",
46201 - hiwater_vm << (PAGE_SHIFT-10),
46202 + "VmSwap:\t%8lu kB\n"
46203 +
46204 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46205 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46206 +#endif
46207 +
46208 + ,hiwater_vm << (PAGE_SHIFT-10),
46209 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46210 mm->locked_vm << (PAGE_SHIFT-10),
46211 mm->pinned_vm << (PAGE_SHIFT-10),
46212 @@ -62,7 +67,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46213 data << (PAGE_SHIFT-10),
46214 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46215 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46216 - swap << (PAGE_SHIFT-10));
46217 + swap << (PAGE_SHIFT-10)
46218 +
46219 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46220 + , mm->context.user_cs_base, mm->context.user_cs_limit
46221 +#endif
46222 +
46223 + );
46224 }
46225
46226 unsigned long task_vsize(struct mm_struct *mm)
46227 @@ -209,6 +220,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46228 return ret;
46229 }
46230
46231 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46232 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46233 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46234 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46235 +#endif
46236 +
46237 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46238 {
46239 struct mm_struct *mm = vma->vm_mm;
46240 @@ -227,13 +244,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46241 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46242 }
46243
46244 - /* We don't show the stack guard page in /proc/maps */
46245 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46246 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46247 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46248 +#else
46249 start = vma->vm_start;
46250 - if (stack_guard_page_start(vma, start))
46251 - start += PAGE_SIZE;
46252 end = vma->vm_end;
46253 - if (stack_guard_page_end(vma, end))
46254 - end -= PAGE_SIZE;
46255 +#endif
46256
46257 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46258 start,
46259 @@ -242,7 +259,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46260 flags & VM_WRITE ? 'w' : '-',
46261 flags & VM_EXEC ? 'x' : '-',
46262 flags & VM_MAYSHARE ? 's' : 'p',
46263 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46264 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46265 +#else
46266 pgoff,
46267 +#endif
46268 MAJOR(dev), MINOR(dev), ino, &len);
46269
46270 /*
46271 @@ -251,7 +272,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46272 */
46273 if (file) {
46274 pad_len_spaces(m, len);
46275 - seq_path(m, &file->f_path, "\n");
46276 + seq_path(m, &file->f_path, "\n\\");
46277 } else {
46278 const char *name = arch_vma_name(vma);
46279 if (!name) {
46280 @@ -259,8 +280,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46281 if (vma->vm_start <= mm->brk &&
46282 vma->vm_end >= mm->start_brk) {
46283 name = "[heap]";
46284 - } else if (vma->vm_start <= mm->start_stack &&
46285 - vma->vm_end >= mm->start_stack) {
46286 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46287 + (vma->vm_start <= mm->start_stack &&
46288 + vma->vm_end >= mm->start_stack)) {
46289 name = "[stack]";
46290 }
46291 } else {
46292 @@ -435,11 +457,16 @@ static int show_smap(struct seq_file *m, void *v)
46293 };
46294
46295 memset(&mss, 0, sizeof mss);
46296 - mss.vma = vma;
46297 - /* mmap_sem is held in m_start */
46298 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46299 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46300 -
46301 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46302 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46303 +#endif
46304 + mss.vma = vma;
46305 + /* mmap_sem is held in m_start */
46306 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46307 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46308 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46309 + }
46310 +#endif
46311 show_map_vma(m, vma);
46312
46313 seq_printf(m,
46314 @@ -457,7 +484,11 @@ static int show_smap(struct seq_file *m, void *v)
46315 "KernelPageSize: %8lu kB\n"
46316 "MMUPageSize: %8lu kB\n"
46317 "Locked: %8lu kB\n",
46318 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46319 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46320 +#else
46321 (vma->vm_end - vma->vm_start) >> 10,
46322 +#endif
46323 mss.resident >> 10,
46324 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46325 mss.shared_clean >> 10,
46326 @@ -1036,7 +1067,7 @@ static int show_numa_map(struct seq_file *m, void *v)
46327
46328 if (file) {
46329 seq_printf(m, " file=");
46330 - seq_path(m, &file->f_path, "\n\t= ");
46331 + seq_path(m, &file->f_path, "\n\t\\= ");
46332 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46333 seq_printf(m, " heap");
46334 } else if (vma->vm_start <= mm->start_stack &&
46335 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46336 index 980de54..2a4db5f 100644
46337 --- a/fs/proc/task_nommu.c
46338 +++ b/fs/proc/task_nommu.c
46339 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46340 else
46341 bytes += kobjsize(mm);
46342
46343 - if (current->fs && current->fs->users > 1)
46344 + if (current->fs && atomic_read(&current->fs->users) > 1)
46345 sbytes += kobjsize(current->fs);
46346 else
46347 bytes += kobjsize(current->fs);
46348 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46349
46350 if (file) {
46351 pad_len_spaces(m, len);
46352 - seq_path(m, &file->f_path, "");
46353 + seq_path(m, &file->f_path, "\n\\");
46354 } else if (mm) {
46355 if (vma->vm_start <= mm->start_stack &&
46356 vma->vm_end >= mm->start_stack) {
46357 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46358 index d67908b..d13f6a6 100644
46359 --- a/fs/quota/netlink.c
46360 +++ b/fs/quota/netlink.c
46361 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46362 void quota_send_warning(short type, unsigned int id, dev_t dev,
46363 const char warntype)
46364 {
46365 - static atomic_t seq;
46366 + static atomic_unchecked_t seq;
46367 struct sk_buff *skb;
46368 void *msg_head;
46369 int ret;
46370 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46371 "VFS: Not enough memory to send quota warning.\n");
46372 return;
46373 }
46374 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46375 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46376 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46377 if (!msg_head) {
46378 printk(KERN_ERR
46379 diff --git a/fs/readdir.c b/fs/readdir.c
46380 index 356f715..c918d38 100644
46381 --- a/fs/readdir.c
46382 +++ b/fs/readdir.c
46383 @@ -17,6 +17,7 @@
46384 #include <linux/security.h>
46385 #include <linux/syscalls.h>
46386 #include <linux/unistd.h>
46387 +#include <linux/namei.h>
46388
46389 #include <asm/uaccess.h>
46390
46391 @@ -67,6 +68,7 @@ struct old_linux_dirent {
46392
46393 struct readdir_callback {
46394 struct old_linux_dirent __user * dirent;
46395 + struct file * file;
46396 int result;
46397 };
46398
46399 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46400 buf->result = -EOVERFLOW;
46401 return -EOVERFLOW;
46402 }
46403 +
46404 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46405 + return 0;
46406 +
46407 buf->result++;
46408 dirent = buf->dirent;
46409 if (!access_ok(VERIFY_WRITE, dirent,
46410 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46411
46412 buf.result = 0;
46413 buf.dirent = dirent;
46414 + buf.file = file;
46415
46416 error = vfs_readdir(file, fillonedir, &buf);
46417 if (buf.result)
46418 @@ -142,6 +149,7 @@ struct linux_dirent {
46419 struct getdents_callback {
46420 struct linux_dirent __user * current_dir;
46421 struct linux_dirent __user * previous;
46422 + struct file * file;
46423 int count;
46424 int error;
46425 };
46426 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46427 buf->error = -EOVERFLOW;
46428 return -EOVERFLOW;
46429 }
46430 +
46431 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46432 + return 0;
46433 +
46434 dirent = buf->previous;
46435 if (dirent) {
46436 if (__put_user(offset, &dirent->d_off))
46437 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46438 buf.previous = NULL;
46439 buf.count = count;
46440 buf.error = 0;
46441 + buf.file = file;
46442
46443 error = vfs_readdir(file, filldir, &buf);
46444 if (error >= 0)
46445 @@ -229,6 +242,7 @@ out:
46446 struct getdents_callback64 {
46447 struct linux_dirent64 __user * current_dir;
46448 struct linux_dirent64 __user * previous;
46449 + struct file *file;
46450 int count;
46451 int error;
46452 };
46453 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46454 buf->error = -EINVAL; /* only used if we fail.. */
46455 if (reclen > buf->count)
46456 return -EINVAL;
46457 +
46458 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46459 + return 0;
46460 +
46461 dirent = buf->previous;
46462 if (dirent) {
46463 if (__put_user(offset, &dirent->d_off))
46464 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46465
46466 buf.current_dir = dirent;
46467 buf.previous = NULL;
46468 + buf.file = file;
46469 buf.count = count;
46470 buf.error = 0;
46471
46472 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46473 error = buf.error;
46474 lastdirent = buf.previous;
46475 if (lastdirent) {
46476 - typeof(lastdirent->d_off) d_off = file->f_pos;
46477 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46478 if (__put_user(d_off, &lastdirent->d_off))
46479 error = -EFAULT;
46480 else
46481 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46482 index 60c0804..d814f98 100644
46483 --- a/fs/reiserfs/do_balan.c
46484 +++ b/fs/reiserfs/do_balan.c
46485 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46486 return;
46487 }
46488
46489 - atomic_inc(&(fs_generation(tb->tb_sb)));
46490 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46491 do_balance_starts(tb);
46492
46493 /* balance leaf returns 0 except if combining L R and S into
46494 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46495 index 7a99811..a7c96c4 100644
46496 --- a/fs/reiserfs/procfs.c
46497 +++ b/fs/reiserfs/procfs.c
46498 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46499 "SMALL_TAILS " : "NO_TAILS ",
46500 replay_only(sb) ? "REPLAY_ONLY " : "",
46501 convert_reiserfs(sb) ? "CONV " : "",
46502 - atomic_read(&r->s_generation_counter),
46503 + atomic_read_unchecked(&r->s_generation_counter),
46504 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46505 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46506 SF(s_good_search_by_key_reada), SF(s_bmaps),
46507 diff --git a/fs/select.c b/fs/select.c
46508 index d33418f..2a5345e 100644
46509 --- a/fs/select.c
46510 +++ b/fs/select.c
46511 @@ -20,6 +20,7 @@
46512 #include <linux/module.h>
46513 #include <linux/slab.h>
46514 #include <linux/poll.h>
46515 +#include <linux/security.h>
46516 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46517 #include <linux/file.h>
46518 #include <linux/fdtable.h>
46519 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46520 struct poll_list *walk = head;
46521 unsigned long todo = nfds;
46522
46523 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46524 if (nfds > rlimit(RLIMIT_NOFILE))
46525 return -EINVAL;
46526
46527 diff --git a/fs/seq_file.c b/fs/seq_file.c
46528 index dba43c3..a99fb63 100644
46529 --- a/fs/seq_file.c
46530 +++ b/fs/seq_file.c
46531 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46532 return 0;
46533 }
46534 if (!m->buf) {
46535 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46536 + m->size = PAGE_SIZE;
46537 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46538 if (!m->buf)
46539 return -ENOMEM;
46540 }
46541 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46542 Eoverflow:
46543 m->op->stop(m, p);
46544 kfree(m->buf);
46545 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46546 + m->size <<= 1;
46547 + m->buf = kmalloc(m->size, GFP_KERNEL);
46548 return !m->buf ? -ENOMEM : -EAGAIN;
46549 }
46550
46551 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46552 m->version = file->f_version;
46553 /* grab buffer if we didn't have one */
46554 if (!m->buf) {
46555 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46556 + m->size = PAGE_SIZE;
46557 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46558 if (!m->buf)
46559 goto Enomem;
46560 }
46561 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46562 goto Fill;
46563 m->op->stop(m, p);
46564 kfree(m->buf);
46565 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46566 + m->size <<= 1;
46567 + m->buf = kmalloc(m->size, GFP_KERNEL);
46568 if (!m->buf)
46569 goto Enomem;
46570 m->count = 0;
46571 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
46572 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46573 void *data)
46574 {
46575 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46576 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46577 int res = -ENOMEM;
46578
46579 if (op) {
46580 diff --git a/fs/splice.c b/fs/splice.c
46581 index fa2defa..8601650 100644
46582 --- a/fs/splice.c
46583 +++ b/fs/splice.c
46584 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46585 pipe_lock(pipe);
46586
46587 for (;;) {
46588 - if (!pipe->readers) {
46589 + if (!atomic_read(&pipe->readers)) {
46590 send_sig(SIGPIPE, current, 0);
46591 if (!ret)
46592 ret = -EPIPE;
46593 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46594 do_wakeup = 0;
46595 }
46596
46597 - pipe->waiting_writers++;
46598 + atomic_inc(&pipe->waiting_writers);
46599 pipe_wait(pipe);
46600 - pipe->waiting_writers--;
46601 + atomic_dec(&pipe->waiting_writers);
46602 }
46603
46604 pipe_unlock(pipe);
46605 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46606 old_fs = get_fs();
46607 set_fs(get_ds());
46608 /* The cast to a user pointer is valid due to the set_fs() */
46609 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46610 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46611 set_fs(old_fs);
46612
46613 return res;
46614 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46615 old_fs = get_fs();
46616 set_fs(get_ds());
46617 /* The cast to a user pointer is valid due to the set_fs() */
46618 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46619 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46620 set_fs(old_fs);
46621
46622 return res;
46623 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46624 goto err;
46625
46626 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46627 - vec[i].iov_base = (void __user *) page_address(page);
46628 + vec[i].iov_base = (void __force_user *) page_address(page);
46629 vec[i].iov_len = this_len;
46630 spd.pages[i] = page;
46631 spd.nr_pages++;
46632 @@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46633 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46634 {
46635 while (!pipe->nrbufs) {
46636 - if (!pipe->writers)
46637 + if (!atomic_read(&pipe->writers))
46638 return 0;
46639
46640 - if (!pipe->waiting_writers && sd->num_spliced)
46641 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46642 return 0;
46643
46644 if (sd->flags & SPLICE_F_NONBLOCK)
46645 @@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
46646 * out of the pipe right after the splice_to_pipe(). So set
46647 * PIPE_READERS appropriately.
46648 */
46649 - pipe->readers = 1;
46650 + atomic_set(&pipe->readers, 1);
46651
46652 current->splice_pipe = pipe;
46653 }
46654 @@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46655 ret = -ERESTARTSYS;
46656 break;
46657 }
46658 - if (!pipe->writers)
46659 + if (!atomic_read(&pipe->writers))
46660 break;
46661 - if (!pipe->waiting_writers) {
46662 + if (!atomic_read(&pipe->waiting_writers)) {
46663 if (flags & SPLICE_F_NONBLOCK) {
46664 ret = -EAGAIN;
46665 break;
46666 @@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46667 pipe_lock(pipe);
46668
46669 while (pipe->nrbufs >= pipe->buffers) {
46670 - if (!pipe->readers) {
46671 + if (!atomic_read(&pipe->readers)) {
46672 send_sig(SIGPIPE, current, 0);
46673 ret = -EPIPE;
46674 break;
46675 @@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46676 ret = -ERESTARTSYS;
46677 break;
46678 }
46679 - pipe->waiting_writers++;
46680 + atomic_inc(&pipe->waiting_writers);
46681 pipe_wait(pipe);
46682 - pipe->waiting_writers--;
46683 + atomic_dec(&pipe->waiting_writers);
46684 }
46685
46686 pipe_unlock(pipe);
46687 @@ -1819,14 +1819,14 @@ retry:
46688 pipe_double_lock(ipipe, opipe);
46689
46690 do {
46691 - if (!opipe->readers) {
46692 + if (!atomic_read(&opipe->readers)) {
46693 send_sig(SIGPIPE, current, 0);
46694 if (!ret)
46695 ret = -EPIPE;
46696 break;
46697 }
46698
46699 - if (!ipipe->nrbufs && !ipipe->writers)
46700 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46701 break;
46702
46703 /*
46704 @@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46705 pipe_double_lock(ipipe, opipe);
46706
46707 do {
46708 - if (!opipe->readers) {
46709 + if (!atomic_read(&opipe->readers)) {
46710 send_sig(SIGPIPE, current, 0);
46711 if (!ret)
46712 ret = -EPIPE;
46713 @@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46714 * return EAGAIN if we have the potential of some data in the
46715 * future, otherwise just return 0
46716 */
46717 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46718 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46719 ret = -EAGAIN;
46720
46721 pipe_unlock(ipipe);
46722 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
46723 index 7fdf6a7..e6cd8ad 100644
46724 --- a/fs/sysfs/dir.c
46725 +++ b/fs/sysfs/dir.c
46726 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
46727 struct sysfs_dirent *sd;
46728 int rc;
46729
46730 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46731 + const char *parent_name = parent_sd->s_name;
46732 +
46733 + mode = S_IFDIR | S_IRWXU;
46734 +
46735 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
46736 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
46737 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
46738 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
46739 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
46740 +#endif
46741 +
46742 /* allocate */
46743 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
46744 if (!sd)
46745 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
46746 index 779789a..f58193c 100644
46747 --- a/fs/sysfs/file.c
46748 +++ b/fs/sysfs/file.c
46749 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
46750
46751 struct sysfs_open_dirent {
46752 atomic_t refcnt;
46753 - atomic_t event;
46754 + atomic_unchecked_t event;
46755 wait_queue_head_t poll;
46756 struct list_head buffers; /* goes through sysfs_buffer.list */
46757 };
46758 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
46759 if (!sysfs_get_active(attr_sd))
46760 return -ENODEV;
46761
46762 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46763 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46764 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46765
46766 sysfs_put_active(attr_sd);
46767 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
46768 return -ENOMEM;
46769
46770 atomic_set(&new_od->refcnt, 0);
46771 - atomic_set(&new_od->event, 1);
46772 + atomic_set_unchecked(&new_od->event, 1);
46773 init_waitqueue_head(&new_od->poll);
46774 INIT_LIST_HEAD(&new_od->buffers);
46775 goto retry;
46776 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
46777
46778 sysfs_put_active(attr_sd);
46779
46780 - if (buffer->event != atomic_read(&od->event))
46781 + if (buffer->event != atomic_read_unchecked(&od->event))
46782 goto trigger;
46783
46784 return DEFAULT_POLLMASK;
46785 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
46786
46787 od = sd->s_attr.open;
46788 if (od) {
46789 - atomic_inc(&od->event);
46790 + atomic_inc_unchecked(&od->event);
46791 wake_up_interruptible(&od->poll);
46792 }
46793
46794 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
46795 index a7ac78f..02158e1 100644
46796 --- a/fs/sysfs/symlink.c
46797 +++ b/fs/sysfs/symlink.c
46798 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46799
46800 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46801 {
46802 - char *page = nd_get_link(nd);
46803 + const char *page = nd_get_link(nd);
46804 if (!IS_ERR(page))
46805 free_page((unsigned long)page);
46806 }
46807 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
46808 index c175b4d..8f36a16 100644
46809 --- a/fs/udf/misc.c
46810 +++ b/fs/udf/misc.c
46811 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
46812
46813 u8 udf_tag_checksum(const struct tag *t)
46814 {
46815 - u8 *data = (u8 *)t;
46816 + const u8 *data = (const u8 *)t;
46817 u8 checksum = 0;
46818 int i;
46819 for (i = 0; i < sizeof(struct tag); ++i)
46820 diff --git a/fs/utimes.c b/fs/utimes.c
46821 index ba653f3..06ea4b1 100644
46822 --- a/fs/utimes.c
46823 +++ b/fs/utimes.c
46824 @@ -1,6 +1,7 @@
46825 #include <linux/compiler.h>
46826 #include <linux/file.h>
46827 #include <linux/fs.h>
46828 +#include <linux/security.h>
46829 #include <linux/linkage.h>
46830 #include <linux/mount.h>
46831 #include <linux/namei.h>
46832 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
46833 goto mnt_drop_write_and_out;
46834 }
46835 }
46836 +
46837 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46838 + error = -EACCES;
46839 + goto mnt_drop_write_and_out;
46840 + }
46841 +
46842 mutex_lock(&inode->i_mutex);
46843 error = notify_change(path->dentry, &newattrs);
46844 mutex_unlock(&inode->i_mutex);
46845 diff --git a/fs/xattr.c b/fs/xattr.c
46846 index 67583de..c5aad14 100644
46847 --- a/fs/xattr.c
46848 +++ b/fs/xattr.c
46849 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46850 * Extended attribute SET operations
46851 */
46852 static long
46853 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
46854 +setxattr(struct path *path, const char __user *name, const void __user *value,
46855 size_t size, int flags)
46856 {
46857 int error;
46858 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
46859 return PTR_ERR(kvalue);
46860 }
46861
46862 - error = vfs_setxattr(d, kname, kvalue, size, flags);
46863 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46864 + error = -EACCES;
46865 + goto out;
46866 + }
46867 +
46868 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46869 +out:
46870 kfree(kvalue);
46871 return error;
46872 }
46873 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
46874 return error;
46875 error = mnt_want_write(path.mnt);
46876 if (!error) {
46877 - error = setxattr(path.dentry, name, value, size, flags);
46878 + error = setxattr(&path, name, value, size, flags);
46879 mnt_drop_write(path.mnt);
46880 }
46881 path_put(&path);
46882 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
46883 return error;
46884 error = mnt_want_write(path.mnt);
46885 if (!error) {
46886 - error = setxattr(path.dentry, name, value, size, flags);
46887 + error = setxattr(&path, name, value, size, flags);
46888 mnt_drop_write(path.mnt);
46889 }
46890 path_put(&path);
46891 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
46892 const void __user *,value, size_t, size, int, flags)
46893 {
46894 struct file *f;
46895 - struct dentry *dentry;
46896 int error = -EBADF;
46897
46898 f = fget(fd);
46899 if (!f)
46900 return error;
46901 - dentry = f->f_path.dentry;
46902 - audit_inode(NULL, dentry);
46903 + audit_inode(NULL, f->f_path.dentry);
46904 error = mnt_want_write_file(f);
46905 if (!error) {
46906 - error = setxattr(dentry, name, value, size, flags);
46907 + error = setxattr(&f->f_path, name, value, size, flags);
46908 mnt_drop_write(f->f_path.mnt);
46909 }
46910 fput(f);
46911 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
46912 index 8d5a506..7f62712 100644
46913 --- a/fs/xattr_acl.c
46914 +++ b/fs/xattr_acl.c
46915 @@ -17,8 +17,8 @@
46916 struct posix_acl *
46917 posix_acl_from_xattr(const void *value, size_t size)
46918 {
46919 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46920 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46921 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46922 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46923 int count;
46924 struct posix_acl *acl;
46925 struct posix_acl_entry *acl_e;
46926 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
46927 index d0ab788..827999b 100644
46928 --- a/fs/xfs/xfs_bmap.c
46929 +++ b/fs/xfs/xfs_bmap.c
46930 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
46931 int nmap,
46932 int ret_nmap);
46933 #else
46934 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46935 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46936 #endif /* DEBUG */
46937
46938 STATIC int
46939 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
46940 index 79d05e8..e3e5861 100644
46941 --- a/fs/xfs/xfs_dir2_sf.c
46942 +++ b/fs/xfs/xfs_dir2_sf.c
46943 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
46944 }
46945
46946 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
46947 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46948 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46949 + char name[sfep->namelen];
46950 + memcpy(name, sfep->name, sfep->namelen);
46951 + if (filldir(dirent, name, sfep->namelen,
46952 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
46953 + *offset = off & 0x7fffffff;
46954 + return 0;
46955 + }
46956 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46957 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46958 *offset = off & 0x7fffffff;
46959 return 0;
46960 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
46961 index d99a905..9f88202 100644
46962 --- a/fs/xfs/xfs_ioctl.c
46963 +++ b/fs/xfs/xfs_ioctl.c
46964 @@ -128,7 +128,7 @@ xfs_find_handle(
46965 }
46966
46967 error = -EFAULT;
46968 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46969 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46970 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46971 goto out_put;
46972
46973 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
46974 index 23ce927..e274cc1 100644
46975 --- a/fs/xfs/xfs_iops.c
46976 +++ b/fs/xfs/xfs_iops.c
46977 @@ -447,7 +447,7 @@ xfs_vn_put_link(
46978 struct nameidata *nd,
46979 void *p)
46980 {
46981 - char *s = nd_get_link(nd);
46982 + const char *s = nd_get_link(nd);
46983
46984 if (!IS_ERR(s))
46985 kfree(s);
46986 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
46987 new file mode 100644
46988 index 0000000..dfd3d34
46989 --- /dev/null
46990 +++ b/grsecurity/Kconfig
46991 @@ -0,0 +1,1069 @@
46992 +#
46993 +# grecurity configuration
46994 +#
46995 +
46996 +menu "Grsecurity"
46997 +
46998 +config GRKERNSEC
46999 + bool "Grsecurity"
47000 + select CRYPTO
47001 + select CRYPTO_SHA256
47002 + help
47003 + If you say Y here, you will be able to configure many features
47004 + that will enhance the security of your system. It is highly
47005 + recommended that you say Y here and read through the help
47006 + for each option so that you fully understand the features and
47007 + can evaluate their usefulness for your machine.
47008 +
47009 +choice
47010 + prompt "Security Level"
47011 + depends on GRKERNSEC
47012 + default GRKERNSEC_CUSTOM
47013 +
47014 +config GRKERNSEC_LOW
47015 + bool "Low"
47016 + select GRKERNSEC_LINK
47017 + select GRKERNSEC_FIFO
47018 + select GRKERNSEC_RANDNET
47019 + select GRKERNSEC_DMESG
47020 + select GRKERNSEC_CHROOT
47021 + select GRKERNSEC_CHROOT_CHDIR
47022 +
47023 + help
47024 + If you choose this option, several of the grsecurity options will
47025 + be enabled that will give you greater protection against a number
47026 + of attacks, while assuring that none of your software will have any
47027 + conflicts with the additional security measures. If you run a lot
47028 + of unusual software, or you are having problems with the higher
47029 + security levels, you should say Y here. With this option, the
47030 + following features are enabled:
47031 +
47032 + - Linking restrictions
47033 + - FIFO restrictions
47034 + - Restricted dmesg
47035 + - Enforced chdir("/") on chroot
47036 + - Runtime module disabling
47037 +
47038 +config GRKERNSEC_MEDIUM
47039 + bool "Medium"
47040 + select PAX
47041 + select PAX_EI_PAX
47042 + select PAX_PT_PAX_FLAGS
47043 + select PAX_HAVE_ACL_FLAGS
47044 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47045 + select GRKERNSEC_CHROOT
47046 + select GRKERNSEC_CHROOT_SYSCTL
47047 + select GRKERNSEC_LINK
47048 + select GRKERNSEC_FIFO
47049 + select GRKERNSEC_DMESG
47050 + select GRKERNSEC_RANDNET
47051 + select GRKERNSEC_FORKFAIL
47052 + select GRKERNSEC_TIME
47053 + select GRKERNSEC_SIGNAL
47054 + select GRKERNSEC_CHROOT
47055 + select GRKERNSEC_CHROOT_UNIX
47056 + select GRKERNSEC_CHROOT_MOUNT
47057 + select GRKERNSEC_CHROOT_PIVOT
47058 + select GRKERNSEC_CHROOT_DOUBLE
47059 + select GRKERNSEC_CHROOT_CHDIR
47060 + select GRKERNSEC_CHROOT_MKNOD
47061 + select GRKERNSEC_PROC
47062 + select GRKERNSEC_PROC_USERGROUP
47063 + select PAX_RANDUSTACK
47064 + select PAX_ASLR
47065 + select PAX_RANDMMAP
47066 + select PAX_REFCOUNT if (X86 || SPARC64)
47067 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47068 +
47069 + help
47070 + If you say Y here, several features in addition to those included
47071 + in the low additional security level will be enabled. These
47072 + features provide even more security to your system, though in rare
47073 + cases they may be incompatible with very old or poorly written
47074 + software. If you enable this option, make sure that your auth
47075 + service (identd) is running as gid 1001. With this option,
47076 + the following features (in addition to those provided in the
47077 + low additional security level) will be enabled:
47078 +
47079 + - Failed fork logging
47080 + - Time change logging
47081 + - Signal logging
47082 + - Deny mounts in chroot
47083 + - Deny double chrooting
47084 + - Deny sysctl writes in chroot
47085 + - Deny mknod in chroot
47086 + - Deny access to abstract AF_UNIX sockets out of chroot
47087 + - Deny pivot_root in chroot
47088 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47089 + - /proc restrictions with special GID set to 10 (usually wheel)
47090 + - Address Space Layout Randomization (ASLR)
47091 + - Prevent exploitation of most refcount overflows
47092 + - Bounds checking of copying between the kernel and userland
47093 +
47094 +config GRKERNSEC_HIGH
47095 + bool "High"
47096 + select GRKERNSEC_LINK
47097 + select GRKERNSEC_FIFO
47098 + select GRKERNSEC_DMESG
47099 + select GRKERNSEC_FORKFAIL
47100 + select GRKERNSEC_TIME
47101 + select GRKERNSEC_SIGNAL
47102 + select GRKERNSEC_CHROOT
47103 + select GRKERNSEC_CHROOT_SHMAT
47104 + select GRKERNSEC_CHROOT_UNIX
47105 + select GRKERNSEC_CHROOT_MOUNT
47106 + select GRKERNSEC_CHROOT_FCHDIR
47107 + select GRKERNSEC_CHROOT_PIVOT
47108 + select GRKERNSEC_CHROOT_DOUBLE
47109 + select GRKERNSEC_CHROOT_CHDIR
47110 + select GRKERNSEC_CHROOT_MKNOD
47111 + select GRKERNSEC_CHROOT_CAPS
47112 + select GRKERNSEC_CHROOT_SYSCTL
47113 + select GRKERNSEC_CHROOT_FINDTASK
47114 + select GRKERNSEC_SYSFS_RESTRICT
47115 + select GRKERNSEC_PROC
47116 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47117 + select GRKERNSEC_HIDESYM
47118 + select GRKERNSEC_BRUTE
47119 + select GRKERNSEC_PROC_USERGROUP
47120 + select GRKERNSEC_KMEM
47121 + select GRKERNSEC_RESLOG
47122 + select GRKERNSEC_RANDNET
47123 + select GRKERNSEC_PROC_ADD
47124 + select GRKERNSEC_CHROOT_CHMOD
47125 + select GRKERNSEC_CHROOT_NICE
47126 + select GRKERNSEC_SETXID
47127 + select GRKERNSEC_AUDIT_MOUNT
47128 + select GRKERNSEC_MODHARDEN if (MODULES)
47129 + select GRKERNSEC_HARDEN_PTRACE
47130 + select GRKERNSEC_PTRACE_READEXEC
47131 + select GRKERNSEC_VM86 if (X86_32)
47132 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47133 + select PAX
47134 + select PAX_RANDUSTACK
47135 + select PAX_ASLR
47136 + select PAX_RANDMMAP
47137 + select PAX_NOEXEC
47138 + select PAX_MPROTECT
47139 + select PAX_EI_PAX
47140 + select PAX_PT_PAX_FLAGS
47141 + select PAX_HAVE_ACL_FLAGS
47142 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47143 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
47144 + select PAX_RANDKSTACK if (X86_TSC && X86)
47145 + select PAX_SEGMEXEC if (X86_32)
47146 + select PAX_PAGEEXEC
47147 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47148 + select PAX_EMUTRAMP if (PARISC)
47149 + select PAX_EMUSIGRT if (PARISC)
47150 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47151 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47152 + select PAX_REFCOUNT if (X86 || SPARC64)
47153 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47154 + help
47155 + If you say Y here, many of the features of grsecurity will be
47156 + enabled, which will protect you against many kinds of attacks
47157 + against your system. The heightened security comes at a cost
47158 + of an increased chance of incompatibilities with rare software
47159 + on your machine. Since this security level enables PaX, you should
47160 + view <http://pax.grsecurity.net> and read about the PaX
47161 + project. While you are there, download chpax and run it on
47162 + binaries that cause problems with PaX. Also remember that
47163 + since the /proc restrictions are enabled, you must run your
47164 + identd as gid 1001. This security level enables the following
47165 + features in addition to those listed in the low and medium
47166 + security levels:
47167 +
47168 + - Additional /proc restrictions
47169 + - Chmod restrictions in chroot
47170 + - No signals, ptrace, or viewing of processes outside of chroot
47171 + - Capability restrictions in chroot
47172 + - Deny fchdir out of chroot
47173 + - Priority restrictions in chroot
47174 + - Segmentation-based implementation of PaX
47175 + - Mprotect restrictions
47176 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47177 + - Kernel stack randomization
47178 + - Mount/unmount/remount logging
47179 + - Kernel symbol hiding
47180 + - Hardening of module auto-loading
47181 + - Ptrace restrictions
47182 + - Restricted vm86 mode
47183 + - Restricted sysfs/debugfs
47184 + - Active kernel exploit response
47185 +
47186 +config GRKERNSEC_CUSTOM
47187 + bool "Custom"
47188 + help
47189 + If you say Y here, you will be able to configure every grsecurity
47190 + option, which allows you to enable many more features that aren't
47191 + covered in the basic security levels. These additional features
47192 + include TPE, socket restrictions, and the sysctl system for
47193 + grsecurity. It is advised that you read through the help for
47194 + each option to determine its usefulness in your situation.
47195 +
47196 +endchoice
47197 +
47198 +menu "Address Space Protection"
47199 +depends on GRKERNSEC
47200 +
47201 +config GRKERNSEC_KMEM
47202 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47203 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47204 + help
47205 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47206 + be written to or read from to modify or leak the contents of the running
47207 + kernel. /dev/port will also not be allowed to be opened. If you have module
47208 + support disabled, enabling this will close up four ways that are
47209 + currently used to insert malicious code into the running kernel.
47210 + Even with all these features enabled, we still highly recommend that
47211 + you use the RBAC system, as it is still possible for an attacker to
47212 + modify the running kernel through privileged I/O granted by ioperm/iopl.
47213 + If you are not using XFree86, you may be able to stop this additional
47214 + case by enabling the 'Disable privileged I/O' option. Though nothing
47215 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47216 + but only to video memory, which is the only writing we allow in this
47217 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47218 + not be allowed to mprotect it with PROT_WRITE later.
47219 + It is highly recommended that you say Y here if you meet all the
47220 + conditions above.
47221 +
47222 +config GRKERNSEC_VM86
47223 + bool "Restrict VM86 mode"
47224 + depends on X86_32
47225 +
47226 + help
47227 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47228 + make use of a special execution mode on 32bit x86 processors called
47229 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47230 + video cards and will still work with this option enabled. The purpose
47231 + of the option is to prevent exploitation of emulation errors in
47232 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
47233 + Nearly all users should be able to enable this option.
47234 +
47235 +config GRKERNSEC_IO
47236 + bool "Disable privileged I/O"
47237 + depends on X86
47238 + select RTC_CLASS
47239 + select RTC_INTF_DEV
47240 + select RTC_DRV_CMOS
47241 +
47242 + help
47243 + If you say Y here, all ioperm and iopl calls will return an error.
47244 + Ioperm and iopl can be used to modify the running kernel.
47245 + Unfortunately, some programs need this access to operate properly,
47246 + the most notable of which are XFree86 and hwclock. hwclock can be
47247 + remedied by having RTC support in the kernel, so real-time
47248 + clock support is enabled if this option is enabled, to ensure
47249 + that hwclock operates correctly. XFree86 still will not
47250 + operate correctly with this option enabled, so DO NOT CHOOSE Y
47251 + IF YOU USE XFree86. If you use XFree86 and you still want to
47252 + protect your kernel against modification, use the RBAC system.
47253 +
47254 +config GRKERNSEC_PROC_MEMMAP
47255 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
47256 + default y if (PAX_NOEXEC || PAX_ASLR)
47257 + depends on PAX_NOEXEC || PAX_ASLR
47258 + help
47259 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47260 + give no information about the addresses of its mappings if
47261 + PaX features that rely on random addresses are enabled on the task.
47262 + If you use PaX it is greatly recommended that you say Y here as it
47263 + closes up a hole that makes the full ASLR useless for suid
47264 + binaries.
47265 +
47266 +config GRKERNSEC_BRUTE
47267 + bool "Deter exploit bruteforcing"
47268 + help
47269 + If you say Y here, attempts to bruteforce exploits against forking
47270 + daemons such as apache or sshd, as well as against suid/sgid binaries
47271 + will be deterred. When a child of a forking daemon is killed by PaX
47272 + or crashes due to an illegal instruction or other suspicious signal,
47273 + the parent process will be delayed 30 seconds upon every subsequent
47274 + fork until the administrator is able to assess the situation and
47275 + restart the daemon.
47276 + In the suid/sgid case, the attempt is logged, the user has all their
47277 + processes terminated, and they are prevented from executing any further
47278 + processes for 15 minutes.
47279 + It is recommended that you also enable signal logging in the auditing
47280 + section so that logs are generated when a process triggers a suspicious
47281 + signal.
47282 + If the sysctl option is enabled, a sysctl option with name
47283 + "deter_bruteforce" is created.
47284 +
47285 +
47286 +config GRKERNSEC_MODHARDEN
47287 + bool "Harden module auto-loading"
47288 + depends on MODULES
47289 + help
47290 + If you say Y here, module auto-loading in response to use of some
47291 + feature implemented by an unloaded module will be restricted to
47292 + root users. Enabling this option helps defend against attacks
47293 + by unprivileged users who abuse the auto-loading behavior to
47294 + cause a vulnerable module to load that is then exploited.
47295 +
47296 + If this option prevents a legitimate use of auto-loading for a
47297 + non-root user, the administrator can execute modprobe manually
47298 + with the exact name of the module mentioned in the alert log.
47299 + Alternatively, the administrator can add the module to the list
47300 + of modules loaded at boot by modifying init scripts.
47301 +
47302 + Modification of init scripts will most likely be needed on
47303 + Ubuntu servers with encrypted home directory support enabled,
47304 + as the first non-root user logging in will cause the ecb(aes),
47305 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47306 +
47307 +config GRKERNSEC_HIDESYM
47308 + bool "Hide kernel symbols"
47309 + help
47310 + If you say Y here, getting information on loaded modules, and
47311 + displaying all kernel symbols through a syscall will be restricted
47312 + to users with CAP_SYS_MODULE. For software compatibility reasons,
47313 + /proc/kallsyms will be restricted to the root user. The RBAC
47314 + system can hide that entry even from root.
47315 +
47316 + This option also prevents leaking of kernel addresses through
47317 + several /proc entries.
47318 +
47319 + Note that this option is only effective provided the following
47320 + conditions are met:
47321 + 1) The kernel using grsecurity is not precompiled by some distribution
47322 + 2) You have also enabled GRKERNSEC_DMESG
47323 + 3) You are using the RBAC system and hiding other files such as your
47324 + kernel image and System.map. Alternatively, enabling this option
47325 + causes the permissions on /boot, /lib/modules, and the kernel
47326 + source directory to change at compile time to prevent
47327 + reading by non-root users.
47328 + If the above conditions are met, this option will aid in providing a
47329 + useful protection against local kernel exploitation of overflows
47330 + and arbitrary read/write vulnerabilities.
47331 +
47332 +config GRKERNSEC_KERN_LOCKOUT
47333 + bool "Active kernel exploit response"
47334 + depends on X86 || ARM || PPC || SPARC
47335 + help
47336 + If you say Y here, when a PaX alert is triggered due to suspicious
47337 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47338 + or an OOPs occurs due to bad memory accesses, instead of just
47339 + terminating the offending process (and potentially allowing
47340 + a subsequent exploit from the same user), we will take one of two
47341 + actions:
47342 + If the user was root, we will panic the system
47343 + If the user was non-root, we will log the attempt, terminate
47344 + all processes owned by the user, then prevent them from creating
47345 + any new processes until the system is restarted
47346 + This deters repeated kernel exploitation/bruteforcing attempts
47347 + and is useful for later forensics.
47348 +
47349 +endmenu
47350 +menu "Role Based Access Control Options"
47351 +depends on GRKERNSEC
47352 +
47353 +config GRKERNSEC_RBAC_DEBUG
47354 + bool
47355 +
47356 +config GRKERNSEC_NO_RBAC
47357 + bool "Disable RBAC system"
47358 + help
47359 + If you say Y here, the /dev/grsec device will be removed from the kernel,
47360 + preventing the RBAC system from being enabled. You should only say Y
47361 + here if you have no intention of using the RBAC system, so as to prevent
47362 + an attacker with root access from misusing the RBAC system to hide files
47363 + and processes when loadable module support and /dev/[k]mem have been
47364 + locked down.
47365 +
47366 +config GRKERNSEC_ACL_HIDEKERN
47367 + bool "Hide kernel processes"
47368 + help
47369 + If you say Y here, all kernel threads will be hidden to all
47370 + processes but those whose subject has the "view hidden processes"
47371 + flag.
47372 +
47373 +config GRKERNSEC_ACL_MAXTRIES
47374 + int "Maximum tries before password lockout"
47375 + default 3
47376 + help
47377 + This option enforces the maximum number of times a user can attempt
47378 + to authorize themselves with the grsecurity RBAC system before being
47379 + denied the ability to attempt authorization again for a specified time.
47380 + The lower the number, the harder it will be to brute-force a password.
47381 +
47382 +config GRKERNSEC_ACL_TIMEOUT
47383 + int "Time to wait after max password tries, in seconds"
47384 + default 30
47385 + help
47386 + This option specifies the time the user must wait after attempting to
47387 + authorize to the RBAC system with the maximum number of invalid
47388 + passwords. The higher the number, the harder it will be to brute-force
47389 + a password.
47390 +
47391 +endmenu
47392 +menu "Filesystem Protections"
47393 +depends on GRKERNSEC
47394 +
47395 +config GRKERNSEC_PROC
47396 + bool "Proc restrictions"
47397 + help
47398 + If you say Y here, the permissions of the /proc filesystem
47399 + will be altered to enhance system security and privacy. You MUST
47400 + choose either a user only restriction or a user and group restriction.
47401 + Depending upon the option you choose, you can either restrict users to
47402 + see only the processes they themselves run, or choose a group that can
47403 + view all processes and files normally restricted to root if you choose
47404 + the "restrict to user only" option. NOTE: If you're running identd as
47405 + a non-root user, you will have to run it as the group you specify here.
47406 +
47407 +config GRKERNSEC_PROC_USER
47408 + bool "Restrict /proc to user only"
47409 + depends on GRKERNSEC_PROC
47410 + help
47411 + If you say Y here, non-root users will only be able to view their own
47412 + processes, and restricts them from viewing network-related information,
47413 + and viewing kernel symbol and module information.
47414 +
47415 +config GRKERNSEC_PROC_USERGROUP
47416 + bool "Allow special group"
47417 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47418 + help
47419 + If you say Y here, you will be able to select a group that will be
47420 + able to view all processes and network-related information. If you've
47421 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47422 + remain hidden. This option is useful if you want to run identd as
47423 + a non-root user.
47424 +
47425 +config GRKERNSEC_PROC_GID
47426 + int "GID for special group"
47427 + depends on GRKERNSEC_PROC_USERGROUP
47428 + default 1001
47429 +
47430 +config GRKERNSEC_PROC_ADD
47431 + bool "Additional restrictions"
47432 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47433 + help
47434 + If you say Y here, additional restrictions will be placed on
47435 + /proc that keep normal users from viewing device information and
47436 + slabinfo information that could be useful for exploits.
47437 +
47438 +config GRKERNSEC_LINK
47439 + bool "Linking restrictions"
47440 + help
47441 + If you say Y here, /tmp race exploits will be prevented, since users
47442 + will no longer be able to follow symlinks owned by other users in
47443 + world-writable +t directories (e.g. /tmp), unless the owner of the
47444 + symlink is the owner of the directory. users will also not be
47445 + able to hardlink to files they do not own. If the sysctl option is
47446 + enabled, a sysctl option with name "linking_restrictions" is created.
47447 +
47448 +config GRKERNSEC_FIFO
47449 + bool "FIFO restrictions"
47450 + help
47451 + If you say Y here, users will not be able to write to FIFOs they don't
47452 + own in world-writable +t directories (e.g. /tmp), unless the owner of
47453 + the FIFO is the same owner of the directory it's held in. If the sysctl
47454 + option is enabled, a sysctl option with name "fifo_restrictions" is
47455 + created.
47456 +
47457 +config GRKERNSEC_SYSFS_RESTRICT
47458 + bool "Sysfs/debugfs restriction"
47459 + depends on SYSFS
47460 + help
47461 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47462 + any filesystem normally mounted under it (e.g. debugfs) will be
47463 + mostly accessible only by root. These filesystems generally provide access
47464 + to hardware and debug information that isn't appropriate for unprivileged
47465 + users of the system. Sysfs and debugfs have also become a large source
47466 + of new vulnerabilities, ranging from infoleaks to local compromise.
47467 + There has been very little oversight with an eye toward security involved
47468 + in adding new exporters of information to these filesystems, so their
47469 + use is discouraged.
47470 + For reasons of compatibility, a few directories have been whitelisted
47471 + for access by non-root users:
47472 + /sys/fs/selinux
47473 + /sys/fs/fuse
47474 + /sys/devices/system/cpu
47475 +
47476 +config GRKERNSEC_ROFS
47477 + bool "Runtime read-only mount protection"
47478 + help
47479 + If you say Y here, a sysctl option with name "romount_protect" will
47480 + be created. By setting this option to 1 at runtime, filesystems
47481 + will be protected in the following ways:
47482 + * No new writable mounts will be allowed
47483 + * Existing read-only mounts won't be able to be remounted read/write
47484 + * Write operations will be denied on all block devices
47485 + This option acts independently of grsec_lock: once it is set to 1,
47486 + it cannot be turned off. Therefore, please be mindful of the resulting
47487 + behavior if this option is enabled in an init script on a read-only
47488 + filesystem. This feature is mainly intended for secure embedded systems.
47489 +
47490 +config GRKERNSEC_CHROOT
47491 + bool "Chroot jail restrictions"
47492 + help
47493 + If you say Y here, you will be able to choose several options that will
47494 + make breaking out of a chrooted jail much more difficult. If you
47495 + encounter no software incompatibilities with the following options, it
47496 + is recommended that you enable each one.
47497 +
47498 +config GRKERNSEC_CHROOT_MOUNT
47499 + bool "Deny mounts"
47500 + depends on GRKERNSEC_CHROOT
47501 + help
47502 + If you say Y here, processes inside a chroot will not be able to
47503 + mount or remount filesystems. If the sysctl option is enabled, a
47504 + sysctl option with name "chroot_deny_mount" is created.
47505 +
47506 +config GRKERNSEC_CHROOT_DOUBLE
47507 + bool "Deny double-chroots"
47508 + depends on GRKERNSEC_CHROOT
47509 + help
47510 + If you say Y here, processes inside a chroot will not be able to chroot
47511 + again outside the chroot. This is a widely used method of breaking
47512 + out of a chroot jail and should not be allowed. If the sysctl
47513 + option is enabled, a sysctl option with name
47514 + "chroot_deny_chroot" is created.
47515 +
47516 +config GRKERNSEC_CHROOT_PIVOT
47517 + bool "Deny pivot_root in chroot"
47518 + depends on GRKERNSEC_CHROOT
47519 + help
47520 + If you say Y here, processes inside a chroot will not be able to use
47521 + a function called pivot_root() that was introduced in Linux 2.3.41. It
47522 + works similar to chroot in that it changes the root filesystem. This
47523 + function could be misused in a chrooted process to attempt to break out
47524 + of the chroot, and therefore should not be allowed. If the sysctl
47525 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
47526 + created.
47527 +
47528 +config GRKERNSEC_CHROOT_CHDIR
47529 + bool "Enforce chdir(\"/\") on all chroots"
47530 + depends on GRKERNSEC_CHROOT
47531 + help
47532 + If you say Y here, the current working directory of all newly-chrooted
47533 + applications will be set to the the root directory of the chroot.
47534 + The man page on chroot(2) states:
47535 + Note that this call does not change the current working
47536 + directory, so that `.' can be outside the tree rooted at
47537 + `/'. In particular, the super-user can escape from a
47538 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47539 +
47540 + It is recommended that you say Y here, since it's not known to break
47541 + any software. If the sysctl option is enabled, a sysctl option with
47542 + name "chroot_enforce_chdir" is created.
47543 +
47544 +config GRKERNSEC_CHROOT_CHMOD
47545 + bool "Deny (f)chmod +s"
47546 + depends on GRKERNSEC_CHROOT
47547 + help
47548 + If you say Y here, processes inside a chroot will not be able to chmod
47549 + or fchmod files to make them have suid or sgid bits. This protects
47550 + against another published method of breaking a chroot. If the sysctl
47551 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
47552 + created.
47553 +
47554 +config GRKERNSEC_CHROOT_FCHDIR
47555 + bool "Deny fchdir out of chroot"
47556 + depends on GRKERNSEC_CHROOT
47557 + help
47558 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
47559 + to a file descriptor of the chrooting process that points to a directory
47560 + outside the filesystem will be stopped. If the sysctl option
47561 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47562 +
47563 +config GRKERNSEC_CHROOT_MKNOD
47564 + bool "Deny mknod"
47565 + depends on GRKERNSEC_CHROOT
47566 + help
47567 + If you say Y here, processes inside a chroot will not be allowed to
47568 + mknod. The problem with using mknod inside a chroot is that it
47569 + would allow an attacker to create a device entry that is the same
47570 + as one on the physical root of your system, which could range from
47571 + anything from the console device to a device for your harddrive (which
47572 + they could then use to wipe the drive or steal data). It is recommended
47573 + that you say Y here, unless you run into software incompatibilities.
47574 + If the sysctl option is enabled, a sysctl option with name
47575 + "chroot_deny_mknod" is created.
47576 +
47577 +config GRKERNSEC_CHROOT_SHMAT
47578 + bool "Deny shmat() out of chroot"
47579 + depends on GRKERNSEC_CHROOT
47580 + help
47581 + If you say Y here, processes inside a chroot will not be able to attach
47582 + to shared memory segments that were created outside of the chroot jail.
47583 + It is recommended that you say Y here. If the sysctl option is enabled,
47584 + a sysctl option with name "chroot_deny_shmat" is created.
47585 +
47586 +config GRKERNSEC_CHROOT_UNIX
47587 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
47588 + depends on GRKERNSEC_CHROOT
47589 + help
47590 + If you say Y here, processes inside a chroot will not be able to
47591 + connect to abstract (meaning not belonging to a filesystem) Unix
47592 + domain sockets that were bound outside of a chroot. It is recommended
47593 + that you say Y here. If the sysctl option is enabled, a sysctl option
47594 + with name "chroot_deny_unix" is created.
47595 +
47596 +config GRKERNSEC_CHROOT_FINDTASK
47597 + bool "Protect outside processes"
47598 + depends on GRKERNSEC_CHROOT
47599 + help
47600 + If you say Y here, processes inside a chroot will not be able to
47601 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47602 + getsid, or view any process outside of the chroot. If the sysctl
47603 + option is enabled, a sysctl option with name "chroot_findtask" is
47604 + created.
47605 +
47606 +config GRKERNSEC_CHROOT_NICE
47607 + bool "Restrict priority changes"
47608 + depends on GRKERNSEC_CHROOT
47609 + help
47610 + If you say Y here, processes inside a chroot will not be able to raise
47611 + the priority of processes in the chroot, or alter the priority of
47612 + processes outside the chroot. This provides more security than simply
47613 + removing CAP_SYS_NICE from the process' capability set. If the
47614 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47615 + is created.
47616 +
47617 +config GRKERNSEC_CHROOT_SYSCTL
47618 + bool "Deny sysctl writes"
47619 + depends on GRKERNSEC_CHROOT
47620 + help
47621 + If you say Y here, an attacker in a chroot will not be able to
47622 + write to sysctl entries, either by sysctl(2) or through a /proc
47623 + interface. It is strongly recommended that you say Y here. If the
47624 + sysctl option is enabled, a sysctl option with name
47625 + "chroot_deny_sysctl" is created.
47626 +
47627 +config GRKERNSEC_CHROOT_CAPS
47628 + bool "Capability restrictions"
47629 + depends on GRKERNSEC_CHROOT
47630 + help
47631 + If you say Y here, the capabilities on all processes within a
47632 + chroot jail will be lowered to stop module insertion, raw i/o,
47633 + system and net admin tasks, rebooting the system, modifying immutable
47634 + files, modifying IPC owned by another, and changing the system time.
47635 + This is left an option because it can break some apps. Disable this
47636 + if your chrooted apps are having problems performing those kinds of
47637 + tasks. If the sysctl option is enabled, a sysctl option with
47638 + name "chroot_caps" is created.
47639 +
47640 +endmenu
47641 +menu "Kernel Auditing"
47642 +depends on GRKERNSEC
47643 +
47644 +config GRKERNSEC_AUDIT_GROUP
47645 + bool "Single group for auditing"
47646 + help
47647 + If you say Y here, the exec, chdir, and (un)mount logging features
47648 + will only operate on a group you specify. This option is recommended
47649 + if you only want to watch certain users instead of having a large
47650 + amount of logs from the entire system. If the sysctl option is enabled,
47651 + a sysctl option with name "audit_group" is created.
47652 +
47653 +config GRKERNSEC_AUDIT_GID
47654 + int "GID for auditing"
47655 + depends on GRKERNSEC_AUDIT_GROUP
47656 + default 1007
47657 +
47658 +config GRKERNSEC_EXECLOG
47659 + bool "Exec logging"
47660 + help
47661 + If you say Y here, all execve() calls will be logged (since the
47662 + other exec*() calls are frontends to execve(), all execution
47663 + will be logged). Useful for shell-servers that like to keep track
47664 + of their users. If the sysctl option is enabled, a sysctl option with
47665 + name "exec_logging" is created.
47666 + WARNING: This option when enabled will produce a LOT of logs, especially
47667 + on an active system.
47668 +
47669 +config GRKERNSEC_RESLOG
47670 + bool "Resource logging"
47671 + help
47672 + If you say Y here, all attempts to overstep resource limits will
47673 + be logged with the resource name, the requested size, and the current
47674 + limit. It is highly recommended that you say Y here. If the sysctl
47675 + option is enabled, a sysctl option with name "resource_logging" is
47676 + created. If the RBAC system is enabled, the sysctl value is ignored.
47677 +
47678 +config GRKERNSEC_CHROOT_EXECLOG
47679 + bool "Log execs within chroot"
47680 + help
47681 + If you say Y here, all executions inside a chroot jail will be logged
47682 + to syslog. This can cause a large amount of logs if certain
47683 + applications (eg. djb's daemontools) are installed on the system, and
47684 + is therefore left as an option. If the sysctl option is enabled, a
47685 + sysctl option with name "chroot_execlog" is created.
47686 +
47687 +config GRKERNSEC_AUDIT_PTRACE
47688 + bool "Ptrace logging"
47689 + help
47690 + If you say Y here, all attempts to attach to a process via ptrace
47691 + will be logged. If the sysctl option is enabled, a sysctl option
47692 + with name "audit_ptrace" is created.
47693 +
47694 +config GRKERNSEC_AUDIT_CHDIR
47695 + bool "Chdir logging"
47696 + help
47697 + If you say Y here, all chdir() calls will be logged. If the sysctl
47698 + option is enabled, a sysctl option with name "audit_chdir" is created.
47699 +
47700 +config GRKERNSEC_AUDIT_MOUNT
47701 + bool "(Un)Mount logging"
47702 + help
47703 + If you say Y here, all mounts and unmounts will be logged. If the
47704 + sysctl option is enabled, a sysctl option with name "audit_mount" is
47705 + created.
47706 +
47707 +config GRKERNSEC_SIGNAL
47708 + bool "Signal logging"
47709 + help
47710 + If you say Y here, certain important signals will be logged, such as
47711 + SIGSEGV, which will as a result inform you of when a error in a program
47712 + occurred, which in some cases could mean a possible exploit attempt.
47713 + If the sysctl option is enabled, a sysctl option with name
47714 + "signal_logging" is created.
47715 +
47716 +config GRKERNSEC_FORKFAIL
47717 + bool "Fork failure logging"
47718 + help
47719 + If you say Y here, all failed fork() attempts will be logged.
47720 + This could suggest a fork bomb, or someone attempting to overstep
47721 + their process limit. If the sysctl option is enabled, a sysctl option
47722 + with name "forkfail_logging" is created.
47723 +
47724 +config GRKERNSEC_TIME
47725 + bool "Time change logging"
47726 + help
47727 + If you say Y here, any changes of the system clock will be logged.
47728 + If the sysctl option is enabled, a sysctl option with name
47729 + "timechange_logging" is created.
47730 +
47731 +config GRKERNSEC_PROC_IPADDR
47732 + bool "/proc/<pid>/ipaddr support"
47733 + help
47734 + If you say Y here, a new entry will be added to each /proc/<pid>
47735 + directory that contains the IP address of the person using the task.
47736 + The IP is carried across local TCP and AF_UNIX stream sockets.
47737 + This information can be useful for IDS/IPSes to perform remote response
47738 + to a local attack. The entry is readable by only the owner of the
47739 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
47740 + the RBAC system), and thus does not create privacy concerns.
47741 +
47742 +config GRKERNSEC_RWXMAP_LOG
47743 + bool 'Denied RWX mmap/mprotect logging'
47744 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
47745 + help
47746 + If you say Y here, calls to mmap() and mprotect() with explicit
47747 + usage of PROT_WRITE and PROT_EXEC together will be logged when
47748 + denied by the PAX_MPROTECT feature. If the sysctl option is
47749 + enabled, a sysctl option with name "rwxmap_logging" is created.
47750 +
47751 +config GRKERNSEC_AUDIT_TEXTREL
47752 + bool 'ELF text relocations logging (READ HELP)'
47753 + depends on PAX_MPROTECT
47754 + help
47755 + If you say Y here, text relocations will be logged with the filename
47756 + of the offending library or binary. The purpose of the feature is
47757 + to help Linux distribution developers get rid of libraries and
47758 + binaries that need text relocations which hinder the future progress
47759 + of PaX. Only Linux distribution developers should say Y here, and
47760 + never on a production machine, as this option creates an information
47761 + leak that could aid an attacker in defeating the randomization of
47762 + a single memory region. If the sysctl option is enabled, a sysctl
47763 + option with name "audit_textrel" is created.
47764 +
47765 +endmenu
47766 +
47767 +menu "Executable Protections"
47768 +depends on GRKERNSEC
47769 +
47770 +config GRKERNSEC_DMESG
47771 + bool "Dmesg(8) restriction"
47772 + help
47773 + If you say Y here, non-root users will not be able to use dmesg(8)
47774 + to view up to the last 4kb of messages in the kernel's log buffer.
47775 + The kernel's log buffer often contains kernel addresses and other
47776 + identifying information useful to an attacker in fingerprinting a
47777 + system for a targeted exploit.
47778 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
47779 + created.
47780 +
47781 +config GRKERNSEC_HARDEN_PTRACE
47782 + bool "Deter ptrace-based process snooping"
47783 + help
47784 + If you say Y here, TTY sniffers and other malicious monitoring
47785 + programs implemented through ptrace will be defeated. If you
47786 + have been using the RBAC system, this option has already been
47787 + enabled for several years for all users, with the ability to make
47788 + fine-grained exceptions.
47789 +
47790 + This option only affects the ability of non-root users to ptrace
47791 + processes that are not a descendent of the ptracing process.
47792 + This means that strace ./binary and gdb ./binary will still work,
47793 + but attaching to arbitrary processes will not. If the sysctl
47794 + option is enabled, a sysctl option with name "harden_ptrace" is
47795 + created.
47796 +
47797 +config GRKERNSEC_PTRACE_READEXEC
47798 + bool "Require read access to ptrace sensitive binaries"
47799 + help
47800 + If you say Y here, unprivileged users will not be able to ptrace unreadable
47801 + binaries. This option is useful in environments that
47802 + remove the read bits (e.g. file mode 4711) from suid binaries to
47803 + prevent infoleaking of their contents. This option adds
47804 + consistency to the use of that file mode, as the binary could normally
47805 + be read out when run without privileges while ptracing.
47806 +
47807 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
47808 + is created.
47809 +
47810 +config GRKERNSEC_SETXID
47811 + bool "Enforce consistent multithreaded privileges"
47812 + help
47813 + If you say Y here, a change from a root uid to a non-root uid
47814 + in a multithreaded application will cause the resulting uids,
47815 + gids, supplementary groups, and capabilities in that thread
47816 + to be propagated to the other threads of the process. In most
47817 + cases this is unnecessary, as glibc will emulate this behavior
47818 + on behalf of the application. Other libcs do not act in the
47819 + same way, allowing the other threads of the process to continue
47820 + running with root privileges. If the sysctl option is enabled,
47821 + a sysctl option with name "consistent_setxid" is created.
47822 +
47823 +config GRKERNSEC_TPE
47824 + bool "Trusted Path Execution (TPE)"
47825 + help
47826 + If you say Y here, you will be able to choose a gid to add to the
47827 + supplementary groups of users you want to mark as "untrusted."
47828 + These users will not be able to execute any files that are not in
47829 + root-owned directories writable only by root. If the sysctl option
47830 + is enabled, a sysctl option with name "tpe" is created.
47831 +
47832 +config GRKERNSEC_TPE_ALL
47833 + bool "Partially restrict all non-root users"
47834 + depends on GRKERNSEC_TPE
47835 + help
47836 + If you say Y here, all non-root users will be covered under
47837 + a weaker TPE restriction. This is separate from, and in addition to,
47838 + the main TPE options that you have selected elsewhere. Thus, if a
47839 + "trusted" GID is chosen, this restriction applies to even that GID.
47840 + Under this restriction, all non-root users will only be allowed to
47841 + execute files in directories they own that are not group or
47842 + world-writable, or in directories owned by root and writable only by
47843 + root. If the sysctl option is enabled, a sysctl option with name
47844 + "tpe_restrict_all" is created.
47845 +
47846 +config GRKERNSEC_TPE_INVERT
47847 + bool "Invert GID option"
47848 + depends on GRKERNSEC_TPE
47849 + help
47850 + If you say Y here, the group you specify in the TPE configuration will
47851 + decide what group TPE restrictions will be *disabled* for. This
47852 + option is useful if you want TPE restrictions to be applied to most
47853 + users on the system. If the sysctl option is enabled, a sysctl option
47854 + with name "tpe_invert" is created. Unlike other sysctl options, this
47855 + entry will default to on for backward-compatibility.
47856 +
47857 +config GRKERNSEC_TPE_GID
47858 + int "GID for untrusted users"
47859 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
47860 + default 1005
47861 + help
47862 + Setting this GID determines what group TPE restrictions will be
47863 + *enabled* for. If the sysctl option is enabled, a sysctl option
47864 + with name "tpe_gid" is created.
47865 +
47866 +config GRKERNSEC_TPE_GID
47867 + int "GID for trusted users"
47868 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
47869 + default 1005
47870 + help
47871 + Setting this GID determines what group TPE restrictions will be
47872 + *disabled* for. If the sysctl option is enabled, a sysctl option
47873 + with name "tpe_gid" is created.
47874 +
47875 +endmenu
47876 +menu "Network Protections"
47877 +depends on GRKERNSEC
47878 +
47879 +config GRKERNSEC_RANDNET
47880 + bool "Larger entropy pools"
47881 + help
47882 + If you say Y here, the entropy pools used for many features of Linux
47883 + and grsecurity will be doubled in size. Since several grsecurity
47884 + features use additional randomness, it is recommended that you say Y
47885 + here. Saying Y here has a similar effect as modifying
47886 + /proc/sys/kernel/random/poolsize.
47887 +
47888 +config GRKERNSEC_BLACKHOLE
47889 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
47890 + depends on NET
47891 + help
47892 + If you say Y here, neither TCP resets nor ICMP
47893 + destination-unreachable packets will be sent in response to packets
47894 + sent to ports for which no associated listening process exists.
47895 + This feature supports both IPV4 and IPV6 and exempts the
47896 + loopback interface from blackholing. Enabling this feature
47897 + makes a host more resilient to DoS attacks and reduces network
47898 + visibility against scanners.
47899 +
47900 + The blackhole feature as-implemented is equivalent to the FreeBSD
47901 + blackhole feature, as it prevents RST responses to all packets, not
47902 + just SYNs. Under most application behavior this causes no
47903 + problems, but applications (like haproxy) may not close certain
47904 + connections in a way that cleanly terminates them on the remote
47905 + end, leaving the remote host in LAST_ACK state. Because of this
47906 + side-effect and to prevent intentional LAST_ACK DoSes, this
47907 + feature also adds automatic mitigation against such attacks.
47908 + The mitigation drastically reduces the amount of time a socket
47909 + can spend in LAST_ACK state. If you're using haproxy and not
47910 + all servers it connects to have this option enabled, consider
47911 + disabling this feature on the haproxy host.
47912 +
47913 + If the sysctl option is enabled, two sysctl options with names
47914 + "ip_blackhole" and "lastack_retries" will be created.
47915 + While "ip_blackhole" takes the standard zero/non-zero on/off
47916 + toggle, "lastack_retries" uses the same kinds of values as
47917 + "tcp_retries1" and "tcp_retries2". The default value of 4
47918 + prevents a socket from lasting more than 45 seconds in LAST_ACK
47919 + state.
47920 +
47921 +config GRKERNSEC_SOCKET
47922 + bool "Socket restrictions"
47923 + depends on NET
47924 + help
47925 + If you say Y here, you will be able to choose from several options.
47926 + If you assign a GID on your system and add it to the supplementary
47927 + groups of users you want to restrict socket access to, this patch
47928 + will perform up to three things, based on the option(s) you choose.
47929 +
47930 +config GRKERNSEC_SOCKET_ALL
47931 + bool "Deny any sockets to group"
47932 + depends on GRKERNSEC_SOCKET
47933 + help
47934 + If you say Y here, you will be able to choose a GID of whose users will
47935 + be unable to connect to other hosts from your machine or run server
47936 + applications from your machine. If the sysctl option is enabled, a
47937 + sysctl option with name "socket_all" is created.
47938 +
47939 +config GRKERNSEC_SOCKET_ALL_GID
47940 + int "GID to deny all sockets for"
47941 + depends on GRKERNSEC_SOCKET_ALL
47942 + default 1004
47943 + help
47944 + Here you can choose the GID to disable socket access for. Remember to
47945 + add the users you want socket access disabled for to the GID
47946 + specified here. If the sysctl option is enabled, a sysctl option
47947 + with name "socket_all_gid" is created.
47948 +
47949 +config GRKERNSEC_SOCKET_CLIENT
47950 + bool "Deny client sockets to group"
47951 + depends on GRKERNSEC_SOCKET
47952 + help
47953 + If you say Y here, you will be able to choose a GID of whose users will
47954 + be unable to connect to other hosts from your machine, but will be
47955 + able to run servers. If this option is enabled, all users in the group
47956 + you specify will have to use passive mode when initiating ftp transfers
47957 + from the shell on your machine. If the sysctl option is enabled, a
47958 + sysctl option with name "socket_client" is created.
47959 +
47960 +config GRKERNSEC_SOCKET_CLIENT_GID
47961 + int "GID to deny client sockets for"
47962 + depends on GRKERNSEC_SOCKET_CLIENT
47963 + default 1003
47964 + help
47965 + Here you can choose the GID to disable client socket access for.
47966 + Remember to add the users you want client socket access disabled for to
47967 + the GID specified here. If the sysctl option is enabled, a sysctl
47968 + option with name "socket_client_gid" is created.
47969 +
47970 +config GRKERNSEC_SOCKET_SERVER
47971 + bool "Deny server sockets to group"
47972 + depends on GRKERNSEC_SOCKET
47973 + help
47974 + If you say Y here, you will be able to choose a GID of whose users will
47975 + be unable to run server applications from your machine. If the sysctl
47976 + option is enabled, a sysctl option with name "socket_server" is created.
47977 +
47978 +config GRKERNSEC_SOCKET_SERVER_GID
47979 + int "GID to deny server sockets for"
47980 + depends on GRKERNSEC_SOCKET_SERVER
47981 + default 1002
47982 + help
47983 + Here you can choose the GID to disable server socket access for.
47984 + Remember to add the users you want server socket access disabled for to
47985 + the GID specified here. If the sysctl option is enabled, a sysctl
47986 + option with name "socket_server_gid" is created.
47987 +
47988 +endmenu
47989 +menu "Sysctl support"
47990 +depends on GRKERNSEC && SYSCTL
47991 +
47992 +config GRKERNSEC_SYSCTL
47993 + bool "Sysctl support"
47994 + help
47995 + If you say Y here, you will be able to change the options that
47996 + grsecurity runs with at bootup, without having to recompile your
47997 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
47998 + to enable (1) or disable (0) various features. All the sysctl entries
47999 + are mutable until the "grsec_lock" entry is set to a non-zero value.
48000 + All features enabled in the kernel configuration are disabled at boot
48001 + if you do not say Y to the "Turn on features by default" option.
48002 + All options should be set at startup, and the grsec_lock entry should
48003 + be set to a non-zero value after all the options are set.
48004 + *THIS IS EXTREMELY IMPORTANT*
48005 +
48006 +config GRKERNSEC_SYSCTL_DISTRO
48007 + bool "Extra sysctl support for distro makers (READ HELP)"
48008 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48009 + help
48010 + If you say Y here, additional sysctl options will be created
48011 + for features that affect processes running as root. Therefore,
48012 + it is critical when using this option that the grsec_lock entry be
48013 + enabled after boot. Only distros with prebuilt kernel packages
48014 + with this option enabled that can ensure grsec_lock is enabled
48015 + after boot should use this option.
48016 + *Failure to set grsec_lock after boot makes all grsec features
48017 + this option covers useless*
48018 +
48019 + Currently this option creates the following sysctl entries:
48020 + "Disable Privileged I/O": "disable_priv_io"
48021 +
48022 +config GRKERNSEC_SYSCTL_ON
48023 + bool "Turn on features by default"
48024 + depends on GRKERNSEC_SYSCTL
48025 + help
48026 + If you say Y here, instead of having all features enabled in the
48027 + kernel configuration disabled at boot time, the features will be
48028 + enabled at boot time. It is recommended you say Y here unless
48029 + there is some reason you would want all sysctl-tunable features to
48030 + be disabled by default. As mentioned elsewhere, it is important
48031 + to enable the grsec_lock entry once you have finished modifying
48032 + the sysctl entries.
48033 +
48034 +endmenu
48035 +menu "Logging Options"
48036 +depends on GRKERNSEC
48037 +
48038 +config GRKERNSEC_FLOODTIME
48039 + int "Seconds in between log messages (minimum)"
48040 + default 10
48041 + help
48042 + This option allows you to enforce the number of seconds between
48043 + grsecurity log messages. The default should be suitable for most
48044 + people, however, if you choose to change it, choose a value small enough
48045 + to allow informative logs to be produced, but large enough to
48046 + prevent flooding.
48047 +
48048 +config GRKERNSEC_FLOODBURST
48049 + int "Number of messages in a burst (maximum)"
48050 + default 6
48051 + help
48052 + This option allows you to choose the maximum number of messages allowed
48053 + within the flood time interval you chose in a separate option. The
48054 + default should be suitable for most people, however if you find that
48055 + many of your logs are being interpreted as flooding, you may want to
48056 + raise this value.
48057 +
48058 +endmenu
48059 +
48060 +endmenu
48061 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48062 new file mode 100644
48063 index 0000000..be9ae3a
48064 --- /dev/null
48065 +++ b/grsecurity/Makefile
48066 @@ -0,0 +1,36 @@
48067 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48068 +# during 2001-2009 it has been completely redesigned by Brad Spengler
48069 +# into an RBAC system
48070 +#
48071 +# All code in this directory and various hooks inserted throughout the kernel
48072 +# are copyright Brad Spengler - Open Source Security, Inc., and released
48073 +# under the GPL v2 or higher
48074 +
48075 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48076 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
48077 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48078 +
48079 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48080 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48081 + gracl_learn.o grsec_log.o
48082 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48083 +
48084 +ifdef CONFIG_NET
48085 +obj-y += grsec_sock.o
48086 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48087 +endif
48088 +
48089 +ifndef CONFIG_GRKERNSEC
48090 +obj-y += grsec_disabled.o
48091 +endif
48092 +
48093 +ifdef CONFIG_GRKERNSEC_HIDESYM
48094 +extra-y := grsec_hidesym.o
48095 +$(obj)/grsec_hidesym.o:
48096 + @-chmod -f 500 /boot
48097 + @-chmod -f 500 /lib/modules
48098 + @-chmod -f 500 /lib64/modules
48099 + @-chmod -f 500 /lib32/modules
48100 + @-chmod -f 700 .
48101 + @echo ' grsec: protected kernel image paths'
48102 +endif
48103 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48104 new file mode 100644
48105 index 0000000..d3b423d
48106 --- /dev/null
48107 +++ b/grsecurity/gracl.c
48108 @@ -0,0 +1,4155 @@
48109 +#include <linux/kernel.h>
48110 +#include <linux/module.h>
48111 +#include <linux/sched.h>
48112 +#include <linux/mm.h>
48113 +#include <linux/file.h>
48114 +#include <linux/fs.h>
48115 +#include <linux/namei.h>
48116 +#include <linux/mount.h>
48117 +#include <linux/tty.h>
48118 +#include <linux/proc_fs.h>
48119 +#include <linux/lglock.h>
48120 +#include <linux/slab.h>
48121 +#include <linux/vmalloc.h>
48122 +#include <linux/types.h>
48123 +#include <linux/sysctl.h>
48124 +#include <linux/netdevice.h>
48125 +#include <linux/ptrace.h>
48126 +#include <linux/gracl.h>
48127 +#include <linux/gralloc.h>
48128 +#include <linux/security.h>
48129 +#include <linux/grinternal.h>
48130 +#include <linux/pid_namespace.h>
48131 +#include <linux/fdtable.h>
48132 +#include <linux/percpu.h>
48133 +
48134 +#include <asm/uaccess.h>
48135 +#include <asm/errno.h>
48136 +#include <asm/mman.h>
48137 +
48138 +static struct acl_role_db acl_role_set;
48139 +static struct name_db name_set;
48140 +static struct inodev_db inodev_set;
48141 +
48142 +/* for keeping track of userspace pointers used for subjects, so we
48143 + can share references in the kernel as well
48144 +*/
48145 +
48146 +static struct path real_root;
48147 +
48148 +static struct acl_subj_map_db subj_map_set;
48149 +
48150 +static struct acl_role_label *default_role;
48151 +
48152 +static struct acl_role_label *role_list;
48153 +
48154 +static u16 acl_sp_role_value;
48155 +
48156 +extern char *gr_shared_page[4];
48157 +static DEFINE_MUTEX(gr_dev_mutex);
48158 +DEFINE_RWLOCK(gr_inode_lock);
48159 +
48160 +struct gr_arg *gr_usermode;
48161 +
48162 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
48163 +
48164 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48165 +extern void gr_clear_learn_entries(void);
48166 +
48167 +#ifdef CONFIG_GRKERNSEC_RESLOG
48168 +extern void gr_log_resource(const struct task_struct *task,
48169 + const int res, const unsigned long wanted, const int gt);
48170 +#endif
48171 +
48172 +unsigned char *gr_system_salt;
48173 +unsigned char *gr_system_sum;
48174 +
48175 +static struct sprole_pw **acl_special_roles = NULL;
48176 +static __u16 num_sprole_pws = 0;
48177 +
48178 +static struct acl_role_label *kernel_role = NULL;
48179 +
48180 +static unsigned int gr_auth_attempts = 0;
48181 +static unsigned long gr_auth_expires = 0UL;
48182 +
48183 +#ifdef CONFIG_NET
48184 +extern struct vfsmount *sock_mnt;
48185 +#endif
48186 +
48187 +extern struct vfsmount *pipe_mnt;
48188 +extern struct vfsmount *shm_mnt;
48189 +#ifdef CONFIG_HUGETLBFS
48190 +extern struct vfsmount *hugetlbfs_vfsmount;
48191 +#endif
48192 +
48193 +static struct acl_object_label *fakefs_obj_rw;
48194 +static struct acl_object_label *fakefs_obj_rwx;
48195 +
48196 +extern int gr_init_uidset(void);
48197 +extern void gr_free_uidset(void);
48198 +extern void gr_remove_uid(uid_t uid);
48199 +extern int gr_find_uid(uid_t uid);
48200 +
48201 +DECLARE_BRLOCK(vfsmount_lock);
48202 +
48203 +__inline__ int
48204 +gr_acl_is_enabled(void)
48205 +{
48206 + return (gr_status & GR_READY);
48207 +}
48208 +
48209 +#ifdef CONFIG_BTRFS_FS
48210 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48211 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48212 +#endif
48213 +
48214 +static inline dev_t __get_dev(const struct dentry *dentry)
48215 +{
48216 +#ifdef CONFIG_BTRFS_FS
48217 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48218 + return get_btrfs_dev_from_inode(dentry->d_inode);
48219 + else
48220 +#endif
48221 + return dentry->d_inode->i_sb->s_dev;
48222 +}
48223 +
48224 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48225 +{
48226 + return __get_dev(dentry);
48227 +}
48228 +
48229 +static char gr_task_roletype_to_char(struct task_struct *task)
48230 +{
48231 + switch (task->role->roletype &
48232 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48233 + GR_ROLE_SPECIAL)) {
48234 + case GR_ROLE_DEFAULT:
48235 + return 'D';
48236 + case GR_ROLE_USER:
48237 + return 'U';
48238 + case GR_ROLE_GROUP:
48239 + return 'G';
48240 + case GR_ROLE_SPECIAL:
48241 + return 'S';
48242 + }
48243 +
48244 + return 'X';
48245 +}
48246 +
48247 +char gr_roletype_to_char(void)
48248 +{
48249 + return gr_task_roletype_to_char(current);
48250 +}
48251 +
48252 +__inline__ int
48253 +gr_acl_tpe_check(void)
48254 +{
48255 + if (unlikely(!(gr_status & GR_READY)))
48256 + return 0;
48257 + if (current->role->roletype & GR_ROLE_TPE)
48258 + return 1;
48259 + else
48260 + return 0;
48261 +}
48262 +
48263 +int
48264 +gr_handle_rawio(const struct inode *inode)
48265 +{
48266 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48267 + if (inode && S_ISBLK(inode->i_mode) &&
48268 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48269 + !capable(CAP_SYS_RAWIO))
48270 + return 1;
48271 +#endif
48272 + return 0;
48273 +}
48274 +
48275 +static int
48276 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48277 +{
48278 + if (likely(lena != lenb))
48279 + return 0;
48280 +
48281 + return !memcmp(a, b, lena);
48282 +}
48283 +
48284 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48285 +{
48286 + *buflen -= namelen;
48287 + if (*buflen < 0)
48288 + return -ENAMETOOLONG;
48289 + *buffer -= namelen;
48290 + memcpy(*buffer, str, namelen);
48291 + return 0;
48292 +}
48293 +
48294 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48295 +{
48296 + return prepend(buffer, buflen, name->name, name->len);
48297 +}
48298 +
48299 +static int prepend_path(const struct path *path, struct path *root,
48300 + char **buffer, int *buflen)
48301 +{
48302 + struct dentry *dentry = path->dentry;
48303 + struct vfsmount *vfsmnt = path->mnt;
48304 + bool slash = false;
48305 + int error = 0;
48306 +
48307 + while (dentry != root->dentry || vfsmnt != root->mnt) {
48308 + struct dentry * parent;
48309 +
48310 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48311 + /* Global root? */
48312 + if (vfsmnt->mnt_parent == vfsmnt) {
48313 + goto out;
48314 + }
48315 + dentry = vfsmnt->mnt_mountpoint;
48316 + vfsmnt = vfsmnt->mnt_parent;
48317 + continue;
48318 + }
48319 + parent = dentry->d_parent;
48320 + prefetch(parent);
48321 + spin_lock(&dentry->d_lock);
48322 + error = prepend_name(buffer, buflen, &dentry->d_name);
48323 + spin_unlock(&dentry->d_lock);
48324 + if (!error)
48325 + error = prepend(buffer, buflen, "/", 1);
48326 + if (error)
48327 + break;
48328 +
48329 + slash = true;
48330 + dentry = parent;
48331 + }
48332 +
48333 +out:
48334 + if (!error && !slash)
48335 + error = prepend(buffer, buflen, "/", 1);
48336 +
48337 + return error;
48338 +}
48339 +
48340 +/* this must be called with vfsmount_lock and rename_lock held */
48341 +
48342 +static char *__our_d_path(const struct path *path, struct path *root,
48343 + char *buf, int buflen)
48344 +{
48345 + char *res = buf + buflen;
48346 + int error;
48347 +
48348 + prepend(&res, &buflen, "\0", 1);
48349 + error = prepend_path(path, root, &res, &buflen);
48350 + if (error)
48351 + return ERR_PTR(error);
48352 +
48353 + return res;
48354 +}
48355 +
48356 +static char *
48357 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48358 +{
48359 + char *retval;
48360 +
48361 + retval = __our_d_path(path, root, buf, buflen);
48362 + if (unlikely(IS_ERR(retval)))
48363 + retval = strcpy(buf, "<path too long>");
48364 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48365 + retval[1] = '\0';
48366 +
48367 + return retval;
48368 +}
48369 +
48370 +static char *
48371 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48372 + char *buf, int buflen)
48373 +{
48374 + struct path path;
48375 + char *res;
48376 +
48377 + path.dentry = (struct dentry *)dentry;
48378 + path.mnt = (struct vfsmount *)vfsmnt;
48379 +
48380 + /* we can use real_root.dentry, real_root.mnt, because this is only called
48381 + by the RBAC system */
48382 + res = gen_full_path(&path, &real_root, buf, buflen);
48383 +
48384 + return res;
48385 +}
48386 +
48387 +static char *
48388 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48389 + char *buf, int buflen)
48390 +{
48391 + char *res;
48392 + struct path path;
48393 + struct path root;
48394 + struct task_struct *reaper = &init_task;
48395 +
48396 + path.dentry = (struct dentry *)dentry;
48397 + path.mnt = (struct vfsmount *)vfsmnt;
48398 +
48399 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48400 + get_fs_root(reaper->fs, &root);
48401 +
48402 + write_seqlock(&rename_lock);
48403 + br_read_lock(vfsmount_lock);
48404 + res = gen_full_path(&path, &root, buf, buflen);
48405 + br_read_unlock(vfsmount_lock);
48406 + write_sequnlock(&rename_lock);
48407 +
48408 + path_put(&root);
48409 + return res;
48410 +}
48411 +
48412 +static char *
48413 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48414 +{
48415 + char *ret;
48416 + write_seqlock(&rename_lock);
48417 + br_read_lock(vfsmount_lock);
48418 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48419 + PAGE_SIZE);
48420 + br_read_unlock(vfsmount_lock);
48421 + write_sequnlock(&rename_lock);
48422 + return ret;
48423 +}
48424 +
48425 +static char *
48426 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48427 +{
48428 + char *ret;
48429 + char *buf;
48430 + int buflen;
48431 +
48432 + write_seqlock(&rename_lock);
48433 + br_read_lock(vfsmount_lock);
48434 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48435 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48436 + buflen = (int)(ret - buf);
48437 + if (buflen >= 5)
48438 + prepend(&ret, &buflen, "/proc", 5);
48439 + else
48440 + ret = strcpy(buf, "<path too long>");
48441 + br_read_unlock(vfsmount_lock);
48442 + write_sequnlock(&rename_lock);
48443 + return ret;
48444 +}
48445 +
48446 +char *
48447 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48448 +{
48449 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48450 + PAGE_SIZE);
48451 +}
48452 +
48453 +char *
48454 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48455 +{
48456 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48457 + PAGE_SIZE);
48458 +}
48459 +
48460 +char *
48461 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48462 +{
48463 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48464 + PAGE_SIZE);
48465 +}
48466 +
48467 +char *
48468 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48469 +{
48470 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48471 + PAGE_SIZE);
48472 +}
48473 +
48474 +char *
48475 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48476 +{
48477 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48478 + PAGE_SIZE);
48479 +}
48480 +
48481 +__inline__ __u32
48482 +to_gr_audit(const __u32 reqmode)
48483 +{
48484 + /* masks off auditable permission flags, then shifts them to create
48485 + auditing flags, and adds the special case of append auditing if
48486 + we're requesting write */
48487 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48488 +}
48489 +
48490 +struct acl_subject_label *
48491 +lookup_subject_map(const struct acl_subject_label *userp)
48492 +{
48493 + unsigned int index = shash(userp, subj_map_set.s_size);
48494 + struct subject_map *match;
48495 +
48496 + match = subj_map_set.s_hash[index];
48497 +
48498 + while (match && match->user != userp)
48499 + match = match->next;
48500 +
48501 + if (match != NULL)
48502 + return match->kernel;
48503 + else
48504 + return NULL;
48505 +}
48506 +
48507 +static void
48508 +insert_subj_map_entry(struct subject_map *subjmap)
48509 +{
48510 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48511 + struct subject_map **curr;
48512 +
48513 + subjmap->prev = NULL;
48514 +
48515 + curr = &subj_map_set.s_hash[index];
48516 + if (*curr != NULL)
48517 + (*curr)->prev = subjmap;
48518 +
48519 + subjmap->next = *curr;
48520 + *curr = subjmap;
48521 +
48522 + return;
48523 +}
48524 +
48525 +static struct acl_role_label *
48526 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48527 + const gid_t gid)
48528 +{
48529 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48530 + struct acl_role_label *match;
48531 + struct role_allowed_ip *ipp;
48532 + unsigned int x;
48533 + u32 curr_ip = task->signal->curr_ip;
48534 +
48535 + task->signal->saved_ip = curr_ip;
48536 +
48537 + match = acl_role_set.r_hash[index];
48538 +
48539 + while (match) {
48540 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48541 + for (x = 0; x < match->domain_child_num; x++) {
48542 + if (match->domain_children[x] == uid)
48543 + goto found;
48544 + }
48545 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48546 + break;
48547 + match = match->next;
48548 + }
48549 +found:
48550 + if (match == NULL) {
48551 + try_group:
48552 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48553 + match = acl_role_set.r_hash[index];
48554 +
48555 + while (match) {
48556 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48557 + for (x = 0; x < match->domain_child_num; x++) {
48558 + if (match->domain_children[x] == gid)
48559 + goto found2;
48560 + }
48561 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48562 + break;
48563 + match = match->next;
48564 + }
48565 +found2:
48566 + if (match == NULL)
48567 + match = default_role;
48568 + if (match->allowed_ips == NULL)
48569 + return match;
48570 + else {
48571 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48572 + if (likely
48573 + ((ntohl(curr_ip) & ipp->netmask) ==
48574 + (ntohl(ipp->addr) & ipp->netmask)))
48575 + return match;
48576 + }
48577 + match = default_role;
48578 + }
48579 + } else if (match->allowed_ips == NULL) {
48580 + return match;
48581 + } else {
48582 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48583 + if (likely
48584 + ((ntohl(curr_ip) & ipp->netmask) ==
48585 + (ntohl(ipp->addr) & ipp->netmask)))
48586 + return match;
48587 + }
48588 + goto try_group;
48589 + }
48590 +
48591 + return match;
48592 +}
48593 +
48594 +struct acl_subject_label *
48595 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48596 + const struct acl_role_label *role)
48597 +{
48598 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48599 + struct acl_subject_label *match;
48600 +
48601 + match = role->subj_hash[index];
48602 +
48603 + while (match && (match->inode != ino || match->device != dev ||
48604 + (match->mode & GR_DELETED))) {
48605 + match = match->next;
48606 + }
48607 +
48608 + if (match && !(match->mode & GR_DELETED))
48609 + return match;
48610 + else
48611 + return NULL;
48612 +}
48613 +
48614 +struct acl_subject_label *
48615 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48616 + const struct acl_role_label *role)
48617 +{
48618 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48619 + struct acl_subject_label *match;
48620 +
48621 + match = role->subj_hash[index];
48622 +
48623 + while (match && (match->inode != ino || match->device != dev ||
48624 + !(match->mode & GR_DELETED))) {
48625 + match = match->next;
48626 + }
48627 +
48628 + if (match && (match->mode & GR_DELETED))
48629 + return match;
48630 + else
48631 + return NULL;
48632 +}
48633 +
48634 +static struct acl_object_label *
48635 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48636 + const struct acl_subject_label *subj)
48637 +{
48638 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48639 + struct acl_object_label *match;
48640 +
48641 + match = subj->obj_hash[index];
48642 +
48643 + while (match && (match->inode != ino || match->device != dev ||
48644 + (match->mode & GR_DELETED))) {
48645 + match = match->next;
48646 + }
48647 +
48648 + if (match && !(match->mode & GR_DELETED))
48649 + return match;
48650 + else
48651 + return NULL;
48652 +}
48653 +
48654 +static struct acl_object_label *
48655 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
48656 + const struct acl_subject_label *subj)
48657 +{
48658 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48659 + struct acl_object_label *match;
48660 +
48661 + match = subj->obj_hash[index];
48662 +
48663 + while (match && (match->inode != ino || match->device != dev ||
48664 + !(match->mode & GR_DELETED))) {
48665 + match = match->next;
48666 + }
48667 +
48668 + if (match && (match->mode & GR_DELETED))
48669 + return match;
48670 +
48671 + match = subj->obj_hash[index];
48672 +
48673 + while (match && (match->inode != ino || match->device != dev ||
48674 + (match->mode & GR_DELETED))) {
48675 + match = match->next;
48676 + }
48677 +
48678 + if (match && !(match->mode & GR_DELETED))
48679 + return match;
48680 + else
48681 + return NULL;
48682 +}
48683 +
48684 +static struct name_entry *
48685 +lookup_name_entry(const char *name)
48686 +{
48687 + unsigned int len = strlen(name);
48688 + unsigned int key = full_name_hash(name, len);
48689 + unsigned int index = key % name_set.n_size;
48690 + struct name_entry *match;
48691 +
48692 + match = name_set.n_hash[index];
48693 +
48694 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
48695 + match = match->next;
48696 +
48697 + return match;
48698 +}
48699 +
48700 +static struct name_entry *
48701 +lookup_name_entry_create(const char *name)
48702 +{
48703 + unsigned int len = strlen(name);
48704 + unsigned int key = full_name_hash(name, len);
48705 + unsigned int index = key % name_set.n_size;
48706 + struct name_entry *match;
48707 +
48708 + match = name_set.n_hash[index];
48709 +
48710 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48711 + !match->deleted))
48712 + match = match->next;
48713 +
48714 + if (match && match->deleted)
48715 + return match;
48716 +
48717 + match = name_set.n_hash[index];
48718 +
48719 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48720 + match->deleted))
48721 + match = match->next;
48722 +
48723 + if (match && !match->deleted)
48724 + return match;
48725 + else
48726 + return NULL;
48727 +}
48728 +
48729 +static struct inodev_entry *
48730 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
48731 +{
48732 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
48733 + struct inodev_entry *match;
48734 +
48735 + match = inodev_set.i_hash[index];
48736 +
48737 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48738 + match = match->next;
48739 +
48740 + return match;
48741 +}
48742 +
48743 +static void
48744 +insert_inodev_entry(struct inodev_entry *entry)
48745 +{
48746 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48747 + inodev_set.i_size);
48748 + struct inodev_entry **curr;
48749 +
48750 + entry->prev = NULL;
48751 +
48752 + curr = &inodev_set.i_hash[index];
48753 + if (*curr != NULL)
48754 + (*curr)->prev = entry;
48755 +
48756 + entry->next = *curr;
48757 + *curr = entry;
48758 +
48759 + return;
48760 +}
48761 +
48762 +static void
48763 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48764 +{
48765 + unsigned int index =
48766 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48767 + struct acl_role_label **curr;
48768 + struct acl_role_label *tmp;
48769 +
48770 + curr = &acl_role_set.r_hash[index];
48771 +
48772 + /* if role was already inserted due to domains and already has
48773 + a role in the same bucket as it attached, then we need to
48774 + combine these two buckets
48775 + */
48776 + if (role->next) {
48777 + tmp = role->next;
48778 + while (tmp->next)
48779 + tmp = tmp->next;
48780 + tmp->next = *curr;
48781 + } else
48782 + role->next = *curr;
48783 + *curr = role;
48784 +
48785 + return;
48786 +}
48787 +
48788 +static void
48789 +insert_acl_role_label(struct acl_role_label *role)
48790 +{
48791 + int i;
48792 +
48793 + if (role_list == NULL) {
48794 + role_list = role;
48795 + role->prev = NULL;
48796 + } else {
48797 + role->prev = role_list;
48798 + role_list = role;
48799 + }
48800 +
48801 + /* used for hash chains */
48802 + role->next = NULL;
48803 +
48804 + if (role->roletype & GR_ROLE_DOMAIN) {
48805 + for (i = 0; i < role->domain_child_num; i++)
48806 + __insert_acl_role_label(role, role->domain_children[i]);
48807 + } else
48808 + __insert_acl_role_label(role, role->uidgid);
48809 +}
48810 +
48811 +static int
48812 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48813 +{
48814 + struct name_entry **curr, *nentry;
48815 + struct inodev_entry *ientry;
48816 + unsigned int len = strlen(name);
48817 + unsigned int key = full_name_hash(name, len);
48818 + unsigned int index = key % name_set.n_size;
48819 +
48820 + curr = &name_set.n_hash[index];
48821 +
48822 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48823 + curr = &((*curr)->next);
48824 +
48825 + if (*curr != NULL)
48826 + return 1;
48827 +
48828 + nentry = acl_alloc(sizeof (struct name_entry));
48829 + if (nentry == NULL)
48830 + return 0;
48831 + ientry = acl_alloc(sizeof (struct inodev_entry));
48832 + if (ientry == NULL)
48833 + return 0;
48834 + ientry->nentry = nentry;
48835 +
48836 + nentry->key = key;
48837 + nentry->name = name;
48838 + nentry->inode = inode;
48839 + nentry->device = device;
48840 + nentry->len = len;
48841 + nentry->deleted = deleted;
48842 +
48843 + nentry->prev = NULL;
48844 + curr = &name_set.n_hash[index];
48845 + if (*curr != NULL)
48846 + (*curr)->prev = nentry;
48847 + nentry->next = *curr;
48848 + *curr = nentry;
48849 +
48850 + /* insert us into the table searchable by inode/dev */
48851 + insert_inodev_entry(ientry);
48852 +
48853 + return 1;
48854 +}
48855 +
48856 +static void
48857 +insert_acl_obj_label(struct acl_object_label *obj,
48858 + struct acl_subject_label *subj)
48859 +{
48860 + unsigned int index =
48861 + fhash(obj->inode, obj->device, subj->obj_hash_size);
48862 + struct acl_object_label **curr;
48863 +
48864 +
48865 + obj->prev = NULL;
48866 +
48867 + curr = &subj->obj_hash[index];
48868 + if (*curr != NULL)
48869 + (*curr)->prev = obj;
48870 +
48871 + obj->next = *curr;
48872 + *curr = obj;
48873 +
48874 + return;
48875 +}
48876 +
48877 +static void
48878 +insert_acl_subj_label(struct acl_subject_label *obj,
48879 + struct acl_role_label *role)
48880 +{
48881 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48882 + struct acl_subject_label **curr;
48883 +
48884 + obj->prev = NULL;
48885 +
48886 + curr = &role->subj_hash[index];
48887 + if (*curr != NULL)
48888 + (*curr)->prev = obj;
48889 +
48890 + obj->next = *curr;
48891 + *curr = obj;
48892 +
48893 + return;
48894 +}
48895 +
48896 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48897 +
48898 +static void *
48899 +create_table(__u32 * len, int elementsize)
48900 +{
48901 + unsigned int table_sizes[] = {
48902 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48903 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48904 + 4194301, 8388593, 16777213, 33554393, 67108859
48905 + };
48906 + void *newtable = NULL;
48907 + unsigned int pwr = 0;
48908 +
48909 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48910 + table_sizes[pwr] <= *len)
48911 + pwr++;
48912 +
48913 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
48914 + return newtable;
48915 +
48916 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
48917 + newtable =
48918 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
48919 + else
48920 + newtable = vmalloc(table_sizes[pwr] * elementsize);
48921 +
48922 + *len = table_sizes[pwr];
48923 +
48924 + return newtable;
48925 +}
48926 +
48927 +static int
48928 +init_variables(const struct gr_arg *arg)
48929 +{
48930 + struct task_struct *reaper = &init_task;
48931 + unsigned int stacksize;
48932 +
48933 + subj_map_set.s_size = arg->role_db.num_subjects;
48934 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
48935 + name_set.n_size = arg->role_db.num_objects;
48936 + inodev_set.i_size = arg->role_db.num_objects;
48937 +
48938 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
48939 + !name_set.n_size || !inodev_set.i_size)
48940 + return 1;
48941 +
48942 + if (!gr_init_uidset())
48943 + return 1;
48944 +
48945 + /* set up the stack that holds allocation info */
48946 +
48947 + stacksize = arg->role_db.num_pointers + 5;
48948 +
48949 + if (!acl_alloc_stack_init(stacksize))
48950 + return 1;
48951 +
48952 + /* grab reference for the real root dentry and vfsmount */
48953 + get_fs_root(reaper->fs, &real_root);
48954 +
48955 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48956 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
48957 +#endif
48958 +
48959 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
48960 + if (fakefs_obj_rw == NULL)
48961 + return 1;
48962 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
48963 +
48964 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
48965 + if (fakefs_obj_rwx == NULL)
48966 + return 1;
48967 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
48968 +
48969 + subj_map_set.s_hash =
48970 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
48971 + acl_role_set.r_hash =
48972 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
48973 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
48974 + inodev_set.i_hash =
48975 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
48976 +
48977 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
48978 + !name_set.n_hash || !inodev_set.i_hash)
48979 + return 1;
48980 +
48981 + memset(subj_map_set.s_hash, 0,
48982 + sizeof(struct subject_map *) * subj_map_set.s_size);
48983 + memset(acl_role_set.r_hash, 0,
48984 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
48985 + memset(name_set.n_hash, 0,
48986 + sizeof (struct name_entry *) * name_set.n_size);
48987 + memset(inodev_set.i_hash, 0,
48988 + sizeof (struct inodev_entry *) * inodev_set.i_size);
48989 +
48990 + return 0;
48991 +}
48992 +
48993 +/* free information not needed after startup
48994 + currently contains user->kernel pointer mappings for subjects
48995 +*/
48996 +
48997 +static void
48998 +free_init_variables(void)
48999 +{
49000 + __u32 i;
49001 +
49002 + if (subj_map_set.s_hash) {
49003 + for (i = 0; i < subj_map_set.s_size; i++) {
49004 + if (subj_map_set.s_hash[i]) {
49005 + kfree(subj_map_set.s_hash[i]);
49006 + subj_map_set.s_hash[i] = NULL;
49007 + }
49008 + }
49009 +
49010 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49011 + PAGE_SIZE)
49012 + kfree(subj_map_set.s_hash);
49013 + else
49014 + vfree(subj_map_set.s_hash);
49015 + }
49016 +
49017 + return;
49018 +}
49019 +
49020 +static void
49021 +free_variables(void)
49022 +{
49023 + struct acl_subject_label *s;
49024 + struct acl_role_label *r;
49025 + struct task_struct *task, *task2;
49026 + unsigned int x;
49027 +
49028 + gr_clear_learn_entries();
49029 +
49030 + read_lock(&tasklist_lock);
49031 + do_each_thread(task2, task) {
49032 + task->acl_sp_role = 0;
49033 + task->acl_role_id = 0;
49034 + task->acl = NULL;
49035 + task->role = NULL;
49036 + } while_each_thread(task2, task);
49037 + read_unlock(&tasklist_lock);
49038 +
49039 + /* release the reference to the real root dentry and vfsmount */
49040 + path_put(&real_root);
49041 +
49042 + /* free all object hash tables */
49043 +
49044 + FOR_EACH_ROLE_START(r)
49045 + if (r->subj_hash == NULL)
49046 + goto next_role;
49047 + FOR_EACH_SUBJECT_START(r, s, x)
49048 + if (s->obj_hash == NULL)
49049 + break;
49050 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49051 + kfree(s->obj_hash);
49052 + else
49053 + vfree(s->obj_hash);
49054 + FOR_EACH_SUBJECT_END(s, x)
49055 + FOR_EACH_NESTED_SUBJECT_START(r, s)
49056 + if (s->obj_hash == NULL)
49057 + break;
49058 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49059 + kfree(s->obj_hash);
49060 + else
49061 + vfree(s->obj_hash);
49062 + FOR_EACH_NESTED_SUBJECT_END(s)
49063 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49064 + kfree(r->subj_hash);
49065 + else
49066 + vfree(r->subj_hash);
49067 + r->subj_hash = NULL;
49068 +next_role:
49069 + FOR_EACH_ROLE_END(r)
49070 +
49071 + acl_free_all();
49072 +
49073 + if (acl_role_set.r_hash) {
49074 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49075 + PAGE_SIZE)
49076 + kfree(acl_role_set.r_hash);
49077 + else
49078 + vfree(acl_role_set.r_hash);
49079 + }
49080 + if (name_set.n_hash) {
49081 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
49082 + PAGE_SIZE)
49083 + kfree(name_set.n_hash);
49084 + else
49085 + vfree(name_set.n_hash);
49086 + }
49087 +
49088 + if (inodev_set.i_hash) {
49089 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49090 + PAGE_SIZE)
49091 + kfree(inodev_set.i_hash);
49092 + else
49093 + vfree(inodev_set.i_hash);
49094 + }
49095 +
49096 + gr_free_uidset();
49097 +
49098 + memset(&name_set, 0, sizeof (struct name_db));
49099 + memset(&inodev_set, 0, sizeof (struct inodev_db));
49100 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49101 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49102 +
49103 + default_role = NULL;
49104 + role_list = NULL;
49105 +
49106 + return;
49107 +}
49108 +
49109 +static __u32
49110 +count_user_objs(struct acl_object_label *userp)
49111 +{
49112 + struct acl_object_label o_tmp;
49113 + __u32 num = 0;
49114 +
49115 + while (userp) {
49116 + if (copy_from_user(&o_tmp, userp,
49117 + sizeof (struct acl_object_label)))
49118 + break;
49119 +
49120 + userp = o_tmp.prev;
49121 + num++;
49122 + }
49123 +
49124 + return num;
49125 +}
49126 +
49127 +static struct acl_subject_label *
49128 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49129 +
49130 +static int
49131 +copy_user_glob(struct acl_object_label *obj)
49132 +{
49133 + struct acl_object_label *g_tmp, **guser;
49134 + unsigned int len;
49135 + char *tmp;
49136 +
49137 + if (obj->globbed == NULL)
49138 + return 0;
49139 +
49140 + guser = &obj->globbed;
49141 + while (*guser) {
49142 + g_tmp = (struct acl_object_label *)
49143 + acl_alloc(sizeof (struct acl_object_label));
49144 + if (g_tmp == NULL)
49145 + return -ENOMEM;
49146 +
49147 + if (copy_from_user(g_tmp, *guser,
49148 + sizeof (struct acl_object_label)))
49149 + return -EFAULT;
49150 +
49151 + len = strnlen_user(g_tmp->filename, PATH_MAX);
49152 +
49153 + if (!len || len >= PATH_MAX)
49154 + return -EINVAL;
49155 +
49156 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49157 + return -ENOMEM;
49158 +
49159 + if (copy_from_user(tmp, g_tmp->filename, len))
49160 + return -EFAULT;
49161 + tmp[len-1] = '\0';
49162 + g_tmp->filename = tmp;
49163 +
49164 + *guser = g_tmp;
49165 + guser = &(g_tmp->next);
49166 + }
49167 +
49168 + return 0;
49169 +}
49170 +
49171 +static int
49172 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49173 + struct acl_role_label *role)
49174 +{
49175 + struct acl_object_label *o_tmp;
49176 + unsigned int len;
49177 + int ret;
49178 + char *tmp;
49179 +
49180 + while (userp) {
49181 + if ((o_tmp = (struct acl_object_label *)
49182 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
49183 + return -ENOMEM;
49184 +
49185 + if (copy_from_user(o_tmp, userp,
49186 + sizeof (struct acl_object_label)))
49187 + return -EFAULT;
49188 +
49189 + userp = o_tmp->prev;
49190 +
49191 + len = strnlen_user(o_tmp->filename, PATH_MAX);
49192 +
49193 + if (!len || len >= PATH_MAX)
49194 + return -EINVAL;
49195 +
49196 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49197 + return -ENOMEM;
49198 +
49199 + if (copy_from_user(tmp, o_tmp->filename, len))
49200 + return -EFAULT;
49201 + tmp[len-1] = '\0';
49202 + o_tmp->filename = tmp;
49203 +
49204 + insert_acl_obj_label(o_tmp, subj);
49205 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49206 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49207 + return -ENOMEM;
49208 +
49209 + ret = copy_user_glob(o_tmp);
49210 + if (ret)
49211 + return ret;
49212 +
49213 + if (o_tmp->nested) {
49214 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49215 + if (IS_ERR(o_tmp->nested))
49216 + return PTR_ERR(o_tmp->nested);
49217 +
49218 + /* insert into nested subject list */
49219 + o_tmp->nested->next = role->hash->first;
49220 + role->hash->first = o_tmp->nested;
49221 + }
49222 + }
49223 +
49224 + return 0;
49225 +}
49226 +
49227 +static __u32
49228 +count_user_subjs(struct acl_subject_label *userp)
49229 +{
49230 + struct acl_subject_label s_tmp;
49231 + __u32 num = 0;
49232 +
49233 + while (userp) {
49234 + if (copy_from_user(&s_tmp, userp,
49235 + sizeof (struct acl_subject_label)))
49236 + break;
49237 +
49238 + userp = s_tmp.prev;
49239 + /* do not count nested subjects against this count, since
49240 + they are not included in the hash table, but are
49241 + attached to objects. We have already counted
49242 + the subjects in userspace for the allocation
49243 + stack
49244 + */
49245 + if (!(s_tmp.mode & GR_NESTED))
49246 + num++;
49247 + }
49248 +
49249 + return num;
49250 +}
49251 +
49252 +static int
49253 +copy_user_allowedips(struct acl_role_label *rolep)
49254 +{
49255 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49256 +
49257 + ruserip = rolep->allowed_ips;
49258 +
49259 + while (ruserip) {
49260 + rlast = rtmp;
49261 +
49262 + if ((rtmp = (struct role_allowed_ip *)
49263 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49264 + return -ENOMEM;
49265 +
49266 + if (copy_from_user(rtmp, ruserip,
49267 + sizeof (struct role_allowed_ip)))
49268 + return -EFAULT;
49269 +
49270 + ruserip = rtmp->prev;
49271 +
49272 + if (!rlast) {
49273 + rtmp->prev = NULL;
49274 + rolep->allowed_ips = rtmp;
49275 + } else {
49276 + rlast->next = rtmp;
49277 + rtmp->prev = rlast;
49278 + }
49279 +
49280 + if (!ruserip)
49281 + rtmp->next = NULL;
49282 + }
49283 +
49284 + return 0;
49285 +}
49286 +
49287 +static int
49288 +copy_user_transitions(struct acl_role_label *rolep)
49289 +{
49290 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
49291 +
49292 + unsigned int len;
49293 + char *tmp;
49294 +
49295 + rusertp = rolep->transitions;
49296 +
49297 + while (rusertp) {
49298 + rlast = rtmp;
49299 +
49300 + if ((rtmp = (struct role_transition *)
49301 + acl_alloc(sizeof (struct role_transition))) == NULL)
49302 + return -ENOMEM;
49303 +
49304 + if (copy_from_user(rtmp, rusertp,
49305 + sizeof (struct role_transition)))
49306 + return -EFAULT;
49307 +
49308 + rusertp = rtmp->prev;
49309 +
49310 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49311 +
49312 + if (!len || len >= GR_SPROLE_LEN)
49313 + return -EINVAL;
49314 +
49315 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49316 + return -ENOMEM;
49317 +
49318 + if (copy_from_user(tmp, rtmp->rolename, len))
49319 + return -EFAULT;
49320 + tmp[len-1] = '\0';
49321 + rtmp->rolename = tmp;
49322 +
49323 + if (!rlast) {
49324 + rtmp->prev = NULL;
49325 + rolep->transitions = rtmp;
49326 + } else {
49327 + rlast->next = rtmp;
49328 + rtmp->prev = rlast;
49329 + }
49330 +
49331 + if (!rusertp)
49332 + rtmp->next = NULL;
49333 + }
49334 +
49335 + return 0;
49336 +}
49337 +
49338 +static struct acl_subject_label *
49339 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49340 +{
49341 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49342 + unsigned int len;
49343 + char *tmp;
49344 + __u32 num_objs;
49345 + struct acl_ip_label **i_tmp, *i_utmp2;
49346 + struct gr_hash_struct ghash;
49347 + struct subject_map *subjmap;
49348 + unsigned int i_num;
49349 + int err;
49350 +
49351 + s_tmp = lookup_subject_map(userp);
49352 +
49353 + /* we've already copied this subject into the kernel, just return
49354 + the reference to it, and don't copy it over again
49355 + */
49356 + if (s_tmp)
49357 + return(s_tmp);
49358 +
49359 + if ((s_tmp = (struct acl_subject_label *)
49360 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49361 + return ERR_PTR(-ENOMEM);
49362 +
49363 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49364 + if (subjmap == NULL)
49365 + return ERR_PTR(-ENOMEM);
49366 +
49367 + subjmap->user = userp;
49368 + subjmap->kernel = s_tmp;
49369 + insert_subj_map_entry(subjmap);
49370 +
49371 + if (copy_from_user(s_tmp, userp,
49372 + sizeof (struct acl_subject_label)))
49373 + return ERR_PTR(-EFAULT);
49374 +
49375 + len = strnlen_user(s_tmp->filename, PATH_MAX);
49376 +
49377 + if (!len || len >= PATH_MAX)
49378 + return ERR_PTR(-EINVAL);
49379 +
49380 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49381 + return ERR_PTR(-ENOMEM);
49382 +
49383 + if (copy_from_user(tmp, s_tmp->filename, len))
49384 + return ERR_PTR(-EFAULT);
49385 + tmp[len-1] = '\0';
49386 + s_tmp->filename = tmp;
49387 +
49388 + if (!strcmp(s_tmp->filename, "/"))
49389 + role->root_label = s_tmp;
49390 +
49391 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49392 + return ERR_PTR(-EFAULT);
49393 +
49394 + /* copy user and group transition tables */
49395 +
49396 + if (s_tmp->user_trans_num) {
49397 + uid_t *uidlist;
49398 +
49399 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49400 + if (uidlist == NULL)
49401 + return ERR_PTR(-ENOMEM);
49402 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49403 + return ERR_PTR(-EFAULT);
49404 +
49405 + s_tmp->user_transitions = uidlist;
49406 + }
49407 +
49408 + if (s_tmp->group_trans_num) {
49409 + gid_t *gidlist;
49410 +
49411 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49412 + if (gidlist == NULL)
49413 + return ERR_PTR(-ENOMEM);
49414 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49415 + return ERR_PTR(-EFAULT);
49416 +
49417 + s_tmp->group_transitions = gidlist;
49418 + }
49419 +
49420 + /* set up object hash table */
49421 + num_objs = count_user_objs(ghash.first);
49422 +
49423 + s_tmp->obj_hash_size = num_objs;
49424 + s_tmp->obj_hash =
49425 + (struct acl_object_label **)
49426 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49427 +
49428 + if (!s_tmp->obj_hash)
49429 + return ERR_PTR(-ENOMEM);
49430 +
49431 + memset(s_tmp->obj_hash, 0,
49432 + s_tmp->obj_hash_size *
49433 + sizeof (struct acl_object_label *));
49434 +
49435 + /* add in objects */
49436 + err = copy_user_objs(ghash.first, s_tmp, role);
49437 +
49438 + if (err)
49439 + return ERR_PTR(err);
49440 +
49441 + /* set pointer for parent subject */
49442 + if (s_tmp->parent_subject) {
49443 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49444 +
49445 + if (IS_ERR(s_tmp2))
49446 + return s_tmp2;
49447 +
49448 + s_tmp->parent_subject = s_tmp2;
49449 + }
49450 +
49451 + /* add in ip acls */
49452 +
49453 + if (!s_tmp->ip_num) {
49454 + s_tmp->ips = NULL;
49455 + goto insert;
49456 + }
49457 +
49458 + i_tmp =
49459 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49460 + sizeof (struct acl_ip_label *));
49461 +
49462 + if (!i_tmp)
49463 + return ERR_PTR(-ENOMEM);
49464 +
49465 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49466 + *(i_tmp + i_num) =
49467 + (struct acl_ip_label *)
49468 + acl_alloc(sizeof (struct acl_ip_label));
49469 + if (!*(i_tmp + i_num))
49470 + return ERR_PTR(-ENOMEM);
49471 +
49472 + if (copy_from_user
49473 + (&i_utmp2, s_tmp->ips + i_num,
49474 + sizeof (struct acl_ip_label *)))
49475 + return ERR_PTR(-EFAULT);
49476 +
49477 + if (copy_from_user
49478 + (*(i_tmp + i_num), i_utmp2,
49479 + sizeof (struct acl_ip_label)))
49480 + return ERR_PTR(-EFAULT);
49481 +
49482 + if ((*(i_tmp + i_num))->iface == NULL)
49483 + continue;
49484 +
49485 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49486 + if (!len || len >= IFNAMSIZ)
49487 + return ERR_PTR(-EINVAL);
49488 + tmp = acl_alloc(len);
49489 + if (tmp == NULL)
49490 + return ERR_PTR(-ENOMEM);
49491 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49492 + return ERR_PTR(-EFAULT);
49493 + (*(i_tmp + i_num))->iface = tmp;
49494 + }
49495 +
49496 + s_tmp->ips = i_tmp;
49497 +
49498 +insert:
49499 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49500 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49501 + return ERR_PTR(-ENOMEM);
49502 +
49503 + return s_tmp;
49504 +}
49505 +
49506 +static int
49507 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49508 +{
49509 + struct acl_subject_label s_pre;
49510 + struct acl_subject_label * ret;
49511 + int err;
49512 +
49513 + while (userp) {
49514 + if (copy_from_user(&s_pre, userp,
49515 + sizeof (struct acl_subject_label)))
49516 + return -EFAULT;
49517 +
49518 + /* do not add nested subjects here, add
49519 + while parsing objects
49520 + */
49521 +
49522 + if (s_pre.mode & GR_NESTED) {
49523 + userp = s_pre.prev;
49524 + continue;
49525 + }
49526 +
49527 + ret = do_copy_user_subj(userp, role);
49528 +
49529 + err = PTR_ERR(ret);
49530 + if (IS_ERR(ret))
49531 + return err;
49532 +
49533 + insert_acl_subj_label(ret, role);
49534 +
49535 + userp = s_pre.prev;
49536 + }
49537 +
49538 + return 0;
49539 +}
49540 +
49541 +static int
49542 +copy_user_acl(struct gr_arg *arg)
49543 +{
49544 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49545 + struct sprole_pw *sptmp;
49546 + struct gr_hash_struct *ghash;
49547 + uid_t *domainlist;
49548 + unsigned int r_num;
49549 + unsigned int len;
49550 + char *tmp;
49551 + int err = 0;
49552 + __u16 i;
49553 + __u32 num_subjs;
49554 +
49555 + /* we need a default and kernel role */
49556 + if (arg->role_db.num_roles < 2)
49557 + return -EINVAL;
49558 +
49559 + /* copy special role authentication info from userspace */
49560 +
49561 + num_sprole_pws = arg->num_sprole_pws;
49562 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49563 +
49564 + if (!acl_special_roles) {
49565 + err = -ENOMEM;
49566 + goto cleanup;
49567 + }
49568 +
49569 + for (i = 0; i < num_sprole_pws; i++) {
49570 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49571 + if (!sptmp) {
49572 + err = -ENOMEM;
49573 + goto cleanup;
49574 + }
49575 + if (copy_from_user(sptmp, arg->sprole_pws + i,
49576 + sizeof (struct sprole_pw))) {
49577 + err = -EFAULT;
49578 + goto cleanup;
49579 + }
49580 +
49581 + len =
49582 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49583 +
49584 + if (!len || len >= GR_SPROLE_LEN) {
49585 + err = -EINVAL;
49586 + goto cleanup;
49587 + }
49588 +
49589 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49590 + err = -ENOMEM;
49591 + goto cleanup;
49592 + }
49593 +
49594 + if (copy_from_user(tmp, sptmp->rolename, len)) {
49595 + err = -EFAULT;
49596 + goto cleanup;
49597 + }
49598 + tmp[len-1] = '\0';
49599 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49600 + printk(KERN_ALERT "Copying special role %s\n", tmp);
49601 +#endif
49602 + sptmp->rolename = tmp;
49603 + acl_special_roles[i] = sptmp;
49604 + }
49605 +
49606 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49607 +
49608 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49609 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
49610 +
49611 + if (!r_tmp) {
49612 + err = -ENOMEM;
49613 + goto cleanup;
49614 + }
49615 +
49616 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
49617 + sizeof (struct acl_role_label *))) {
49618 + err = -EFAULT;
49619 + goto cleanup;
49620 + }
49621 +
49622 + if (copy_from_user(r_tmp, r_utmp2,
49623 + sizeof (struct acl_role_label))) {
49624 + err = -EFAULT;
49625 + goto cleanup;
49626 + }
49627 +
49628 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49629 +
49630 + if (!len || len >= PATH_MAX) {
49631 + err = -EINVAL;
49632 + goto cleanup;
49633 + }
49634 +
49635 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49636 + err = -ENOMEM;
49637 + goto cleanup;
49638 + }
49639 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
49640 + err = -EFAULT;
49641 + goto cleanup;
49642 + }
49643 + tmp[len-1] = '\0';
49644 + r_tmp->rolename = tmp;
49645 +
49646 + if (!strcmp(r_tmp->rolename, "default")
49647 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49648 + default_role = r_tmp;
49649 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49650 + kernel_role = r_tmp;
49651 + }
49652 +
49653 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49654 + err = -ENOMEM;
49655 + goto cleanup;
49656 + }
49657 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49658 + err = -EFAULT;
49659 + goto cleanup;
49660 + }
49661 +
49662 + r_tmp->hash = ghash;
49663 +
49664 + num_subjs = count_user_subjs(r_tmp->hash->first);
49665 +
49666 + r_tmp->subj_hash_size = num_subjs;
49667 + r_tmp->subj_hash =
49668 + (struct acl_subject_label **)
49669 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
49670 +
49671 + if (!r_tmp->subj_hash) {
49672 + err = -ENOMEM;
49673 + goto cleanup;
49674 + }
49675 +
49676 + err = copy_user_allowedips(r_tmp);
49677 + if (err)
49678 + goto cleanup;
49679 +
49680 + /* copy domain info */
49681 + if (r_tmp->domain_children != NULL) {
49682 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
49683 + if (domainlist == NULL) {
49684 + err = -ENOMEM;
49685 + goto cleanup;
49686 + }
49687 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
49688 + err = -EFAULT;
49689 + goto cleanup;
49690 + }
49691 + r_tmp->domain_children = domainlist;
49692 + }
49693 +
49694 + err = copy_user_transitions(r_tmp);
49695 + if (err)
49696 + goto cleanup;
49697 +
49698 + memset(r_tmp->subj_hash, 0,
49699 + r_tmp->subj_hash_size *
49700 + sizeof (struct acl_subject_label *));
49701 +
49702 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
49703 +
49704 + if (err)
49705 + goto cleanup;
49706 +
49707 + /* set nested subject list to null */
49708 + r_tmp->hash->first = NULL;
49709 +
49710 + insert_acl_role_label(r_tmp);
49711 + }
49712 +
49713 + goto return_err;
49714 + cleanup:
49715 + free_variables();
49716 + return_err:
49717 + return err;
49718 +
49719 +}
49720 +
49721 +static int
49722 +gracl_init(struct gr_arg *args)
49723 +{
49724 + int error = 0;
49725 +
49726 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49727 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49728 +
49729 + if (init_variables(args)) {
49730 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49731 + error = -ENOMEM;
49732 + free_variables();
49733 + goto out;
49734 + }
49735 +
49736 + error = copy_user_acl(args);
49737 + free_init_variables();
49738 + if (error) {
49739 + free_variables();
49740 + goto out;
49741 + }
49742 +
49743 + if ((error = gr_set_acls(0))) {
49744 + free_variables();
49745 + goto out;
49746 + }
49747 +
49748 + pax_open_kernel();
49749 + gr_status |= GR_READY;
49750 + pax_close_kernel();
49751 +
49752 + out:
49753 + return error;
49754 +}
49755 +
49756 +/* derived from glibc fnmatch() 0: match, 1: no match*/
49757 +
49758 +static int
49759 +glob_match(const char *p, const char *n)
49760 +{
49761 + char c;
49762 +
49763 + while ((c = *p++) != '\0') {
49764 + switch (c) {
49765 + case '?':
49766 + if (*n == '\0')
49767 + return 1;
49768 + else if (*n == '/')
49769 + return 1;
49770 + break;
49771 + case '\\':
49772 + if (*n != c)
49773 + return 1;
49774 + break;
49775 + case '*':
49776 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
49777 + if (*n == '/')
49778 + return 1;
49779 + else if (c == '?') {
49780 + if (*n == '\0')
49781 + return 1;
49782 + else
49783 + ++n;
49784 + }
49785 + }
49786 + if (c == '\0') {
49787 + return 0;
49788 + } else {
49789 + const char *endp;
49790 +
49791 + if ((endp = strchr(n, '/')) == NULL)
49792 + endp = n + strlen(n);
49793 +
49794 + if (c == '[') {
49795 + for (--p; n < endp; ++n)
49796 + if (!glob_match(p, n))
49797 + return 0;
49798 + } else if (c == '/') {
49799 + while (*n != '\0' && *n != '/')
49800 + ++n;
49801 + if (*n == '/' && !glob_match(p, n + 1))
49802 + return 0;
49803 + } else {
49804 + for (--p; n < endp; ++n)
49805 + if (*n == c && !glob_match(p, n))
49806 + return 0;
49807 + }
49808 +
49809 + return 1;
49810 + }
49811 + case '[':
49812 + {
49813 + int not;
49814 + char cold;
49815 +
49816 + if (*n == '\0' || *n == '/')
49817 + return 1;
49818 +
49819 + not = (*p == '!' || *p == '^');
49820 + if (not)
49821 + ++p;
49822 +
49823 + c = *p++;
49824 + for (;;) {
49825 + unsigned char fn = (unsigned char)*n;
49826 +
49827 + if (c == '\0')
49828 + return 1;
49829 + else {
49830 + if (c == fn)
49831 + goto matched;
49832 + cold = c;
49833 + c = *p++;
49834 +
49835 + if (c == '-' && *p != ']') {
49836 + unsigned char cend = *p++;
49837 +
49838 + if (cend == '\0')
49839 + return 1;
49840 +
49841 + if (cold <= fn && fn <= cend)
49842 + goto matched;
49843 +
49844 + c = *p++;
49845 + }
49846 + }
49847 +
49848 + if (c == ']')
49849 + break;
49850 + }
49851 + if (!not)
49852 + return 1;
49853 + break;
49854 + matched:
49855 + while (c != ']') {
49856 + if (c == '\0')
49857 + return 1;
49858 +
49859 + c = *p++;
49860 + }
49861 + if (not)
49862 + return 1;
49863 + }
49864 + break;
49865 + default:
49866 + if (c != *n)
49867 + return 1;
49868 + }
49869 +
49870 + ++n;
49871 + }
49872 +
49873 + if (*n == '\0')
49874 + return 0;
49875 +
49876 + if (*n == '/')
49877 + return 0;
49878 +
49879 + return 1;
49880 +}
49881 +
49882 +static struct acl_object_label *
49883 +chk_glob_label(struct acl_object_label *globbed,
49884 + struct dentry *dentry, struct vfsmount *mnt, char **path)
49885 +{
49886 + struct acl_object_label *tmp;
49887 +
49888 + if (*path == NULL)
49889 + *path = gr_to_filename_nolock(dentry, mnt);
49890 +
49891 + tmp = globbed;
49892 +
49893 + while (tmp) {
49894 + if (!glob_match(tmp->filename, *path))
49895 + return tmp;
49896 + tmp = tmp->next;
49897 + }
49898 +
49899 + return NULL;
49900 +}
49901 +
49902 +static struct acl_object_label *
49903 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49904 + const ino_t curr_ino, const dev_t curr_dev,
49905 + const struct acl_subject_label *subj, char **path, const int checkglob)
49906 +{
49907 + struct acl_subject_label *tmpsubj;
49908 + struct acl_object_label *retval;
49909 + struct acl_object_label *retval2;
49910 +
49911 + tmpsubj = (struct acl_subject_label *) subj;
49912 + read_lock(&gr_inode_lock);
49913 + do {
49914 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
49915 + if (retval) {
49916 + if (checkglob && retval->globbed) {
49917 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
49918 + (struct vfsmount *)orig_mnt, path);
49919 + if (retval2)
49920 + retval = retval2;
49921 + }
49922 + break;
49923 + }
49924 + } while ((tmpsubj = tmpsubj->parent_subject));
49925 + read_unlock(&gr_inode_lock);
49926 +
49927 + return retval;
49928 +}
49929 +
49930 +static __inline__ struct acl_object_label *
49931 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49932 + struct dentry *curr_dentry,
49933 + const struct acl_subject_label *subj, char **path, const int checkglob)
49934 +{
49935 + int newglob = checkglob;
49936 + ino_t inode;
49937 + dev_t device;
49938 +
49939 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
49940 + as we don't want a / * rule to match instead of the / object
49941 + don't do this for create lookups that call this function though, since they're looking up
49942 + on the parent and thus need globbing checks on all paths
49943 + */
49944 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
49945 + newglob = GR_NO_GLOB;
49946 +
49947 + spin_lock(&curr_dentry->d_lock);
49948 + inode = curr_dentry->d_inode->i_ino;
49949 + device = __get_dev(curr_dentry);
49950 + spin_unlock(&curr_dentry->d_lock);
49951 +
49952 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
49953 +}
49954 +
49955 +static struct acl_object_label *
49956 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49957 + const struct acl_subject_label *subj, char *path, const int checkglob)
49958 +{
49959 + struct dentry *dentry = (struct dentry *) l_dentry;
49960 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49961 + struct acl_object_label *retval;
49962 + struct dentry *parent;
49963 +
49964 + write_seqlock(&rename_lock);
49965 + br_read_lock(vfsmount_lock);
49966 +
49967 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
49968 +#ifdef CONFIG_NET
49969 + mnt == sock_mnt ||
49970 +#endif
49971 +#ifdef CONFIG_HUGETLBFS
49972 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
49973 +#endif
49974 + /* ignore Eric Biederman */
49975 + IS_PRIVATE(l_dentry->d_inode))) {
49976 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
49977 + goto out;
49978 + }
49979 +
49980 + for (;;) {
49981 + if (dentry == real_root.dentry && mnt == real_root.mnt)
49982 + break;
49983 +
49984 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49985 + if (mnt->mnt_parent == mnt)
49986 + break;
49987 +
49988 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49989 + if (retval != NULL)
49990 + goto out;
49991 +
49992 + dentry = mnt->mnt_mountpoint;
49993 + mnt = mnt->mnt_parent;
49994 + continue;
49995 + }
49996 +
49997 + parent = dentry->d_parent;
49998 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49999 + if (retval != NULL)
50000 + goto out;
50001 +
50002 + dentry = parent;
50003 + }
50004 +
50005 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50006 +
50007 + /* real_root is pinned so we don't have to hold a reference */
50008 + if (retval == NULL)
50009 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50010 +out:
50011 + br_read_unlock(vfsmount_lock);
50012 + write_sequnlock(&rename_lock);
50013 +
50014 + BUG_ON(retval == NULL);
50015 +
50016 + return retval;
50017 +}
50018 +
50019 +static __inline__ struct acl_object_label *
50020 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50021 + const struct acl_subject_label *subj)
50022 +{
50023 + char *path = NULL;
50024 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50025 +}
50026 +
50027 +static __inline__ struct acl_object_label *
50028 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50029 + const struct acl_subject_label *subj)
50030 +{
50031 + char *path = NULL;
50032 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50033 +}
50034 +
50035 +static __inline__ struct acl_object_label *
50036 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50037 + const struct acl_subject_label *subj, char *path)
50038 +{
50039 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50040 +}
50041 +
50042 +static struct acl_subject_label *
50043 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50044 + const struct acl_role_label *role)
50045 +{
50046 + struct dentry *dentry = (struct dentry *) l_dentry;
50047 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50048 + struct acl_subject_label *retval;
50049 + struct dentry *parent;
50050 +
50051 + write_seqlock(&rename_lock);
50052 + br_read_lock(vfsmount_lock);
50053 +
50054 + for (;;) {
50055 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50056 + break;
50057 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50058 + if (mnt->mnt_parent == mnt)
50059 + break;
50060 +
50061 + spin_lock(&dentry->d_lock);
50062 + read_lock(&gr_inode_lock);
50063 + retval =
50064 + lookup_acl_subj_label(dentry->d_inode->i_ino,
50065 + __get_dev(dentry), role);
50066 + read_unlock(&gr_inode_lock);
50067 + spin_unlock(&dentry->d_lock);
50068 + if (retval != NULL)
50069 + goto out;
50070 +
50071 + dentry = mnt->mnt_mountpoint;
50072 + mnt = mnt->mnt_parent;
50073 + continue;
50074 + }
50075 +
50076 + spin_lock(&dentry->d_lock);
50077 + read_lock(&gr_inode_lock);
50078 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50079 + __get_dev(dentry), role);
50080 + read_unlock(&gr_inode_lock);
50081 + parent = dentry->d_parent;
50082 + spin_unlock(&dentry->d_lock);
50083 +
50084 + if (retval != NULL)
50085 + goto out;
50086 +
50087 + dentry = parent;
50088 + }
50089 +
50090 + spin_lock(&dentry->d_lock);
50091 + read_lock(&gr_inode_lock);
50092 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50093 + __get_dev(dentry), role);
50094 + read_unlock(&gr_inode_lock);
50095 + spin_unlock(&dentry->d_lock);
50096 +
50097 + if (unlikely(retval == NULL)) {
50098 + /* real_root is pinned, we don't need to hold a reference */
50099 + read_lock(&gr_inode_lock);
50100 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50101 + __get_dev(real_root.dentry), role);
50102 + read_unlock(&gr_inode_lock);
50103 + }
50104 +out:
50105 + br_read_unlock(vfsmount_lock);
50106 + write_sequnlock(&rename_lock);
50107 +
50108 + BUG_ON(retval == NULL);
50109 +
50110 + return retval;
50111 +}
50112 +
50113 +static void
50114 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50115 +{
50116 + struct task_struct *task = current;
50117 + const struct cred *cred = current_cred();
50118 +
50119 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50120 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50121 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50122 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50123 +
50124 + return;
50125 +}
50126 +
50127 +static void
50128 +gr_log_learn_sysctl(const char *path, const __u32 mode)
50129 +{
50130 + struct task_struct *task = current;
50131 + const struct cred *cred = current_cred();
50132 +
50133 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50134 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50135 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50136 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50137 +
50138 + return;
50139 +}
50140 +
50141 +static void
50142 +gr_log_learn_id_change(const char type, const unsigned int real,
50143 + const unsigned int effective, const unsigned int fs)
50144 +{
50145 + struct task_struct *task = current;
50146 + const struct cred *cred = current_cred();
50147 +
50148 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50149 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50150 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50151 + type, real, effective, fs, &task->signal->saved_ip);
50152 +
50153 + return;
50154 +}
50155 +
50156 +__u32
50157 +gr_search_file(const struct dentry * dentry, const __u32 mode,
50158 + const struct vfsmount * mnt)
50159 +{
50160 + __u32 retval = mode;
50161 + struct acl_subject_label *curracl;
50162 + struct acl_object_label *currobj;
50163 +
50164 + if (unlikely(!(gr_status & GR_READY)))
50165 + return (mode & ~GR_AUDITS);
50166 +
50167 + curracl = current->acl;
50168 +
50169 + currobj = chk_obj_label(dentry, mnt, curracl);
50170 + retval = currobj->mode & mode;
50171 +
50172 + /* if we're opening a specified transfer file for writing
50173 + (e.g. /dev/initctl), then transfer our role to init
50174 + */
50175 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50176 + current->role->roletype & GR_ROLE_PERSIST)) {
50177 + struct task_struct *task = init_pid_ns.child_reaper;
50178 +
50179 + if (task->role != current->role) {
50180 + task->acl_sp_role = 0;
50181 + task->acl_role_id = current->acl_role_id;
50182 + task->role = current->role;
50183 + rcu_read_lock();
50184 + read_lock(&grsec_exec_file_lock);
50185 + gr_apply_subject_to_task(task);
50186 + read_unlock(&grsec_exec_file_lock);
50187 + rcu_read_unlock();
50188 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50189 + }
50190 + }
50191 +
50192 + if (unlikely
50193 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50194 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50195 + __u32 new_mode = mode;
50196 +
50197 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50198 +
50199 + retval = new_mode;
50200 +
50201 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50202 + new_mode |= GR_INHERIT;
50203 +
50204 + if (!(mode & GR_NOLEARN))
50205 + gr_log_learn(dentry, mnt, new_mode);
50206 + }
50207 +
50208 + return retval;
50209 +}
50210 +
50211 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50212 + const struct dentry *parent,
50213 + const struct vfsmount *mnt)
50214 +{
50215 + struct name_entry *match;
50216 + struct acl_object_label *matchpo;
50217 + struct acl_subject_label *curracl;
50218 + char *path;
50219 +
50220 + if (unlikely(!(gr_status & GR_READY)))
50221 + return NULL;
50222 +
50223 + preempt_disable();
50224 + path = gr_to_filename_rbac(new_dentry, mnt);
50225 + match = lookup_name_entry_create(path);
50226 +
50227 + curracl = current->acl;
50228 +
50229 + if (match) {
50230 + read_lock(&gr_inode_lock);
50231 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50232 + read_unlock(&gr_inode_lock);
50233 +
50234 + if (matchpo) {
50235 + preempt_enable();
50236 + return matchpo;
50237 + }
50238 + }
50239 +
50240 + // lookup parent
50241 +
50242 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50243 +
50244 + preempt_enable();
50245 + return matchpo;
50246 +}
50247 +
50248 +__u32
50249 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50250 + const struct vfsmount * mnt, const __u32 mode)
50251 +{
50252 + struct acl_object_label *matchpo;
50253 + __u32 retval;
50254 +
50255 + if (unlikely(!(gr_status & GR_READY)))
50256 + return (mode & ~GR_AUDITS);
50257 +
50258 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
50259 +
50260 + retval = matchpo->mode & mode;
50261 +
50262 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50263 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50264 + __u32 new_mode = mode;
50265 +
50266 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50267 +
50268 + gr_log_learn(new_dentry, mnt, new_mode);
50269 + return new_mode;
50270 + }
50271 +
50272 + return retval;
50273 +}
50274 +
50275 +__u32
50276 +gr_check_link(const struct dentry * new_dentry,
50277 + const struct dentry * parent_dentry,
50278 + const struct vfsmount * parent_mnt,
50279 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50280 +{
50281 + struct acl_object_label *obj;
50282 + __u32 oldmode, newmode;
50283 + __u32 needmode;
50284 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50285 + GR_DELETE | GR_INHERIT;
50286 +
50287 + if (unlikely(!(gr_status & GR_READY)))
50288 + return (GR_CREATE | GR_LINK);
50289 +
50290 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50291 + oldmode = obj->mode;
50292 +
50293 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50294 + newmode = obj->mode;
50295 +
50296 + needmode = newmode & checkmodes;
50297 +
50298 + // old name for hardlink must have at least the permissions of the new name
50299 + if ((oldmode & needmode) != needmode)
50300 + goto bad;
50301 +
50302 + // if old name had restrictions/auditing, make sure the new name does as well
50303 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50304 +
50305 + // don't allow hardlinking of suid/sgid files without permission
50306 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50307 + needmode |= GR_SETID;
50308 +
50309 + if ((newmode & needmode) != needmode)
50310 + goto bad;
50311 +
50312 + // enforce minimum permissions
50313 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50314 + return newmode;
50315 +bad:
50316 + needmode = oldmode;
50317 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50318 + needmode |= GR_SETID;
50319 +
50320 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50321 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50322 + return (GR_CREATE | GR_LINK);
50323 + } else if (newmode & GR_SUPPRESS)
50324 + return GR_SUPPRESS;
50325 + else
50326 + return 0;
50327 +}
50328 +
50329 +int
50330 +gr_check_hidden_task(const struct task_struct *task)
50331 +{
50332 + if (unlikely(!(gr_status & GR_READY)))
50333 + return 0;
50334 +
50335 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50336 + return 1;
50337 +
50338 + return 0;
50339 +}
50340 +
50341 +int
50342 +gr_check_protected_task(const struct task_struct *task)
50343 +{
50344 + if (unlikely(!(gr_status & GR_READY) || !task))
50345 + return 0;
50346 +
50347 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50348 + task->acl != current->acl)
50349 + return 1;
50350 +
50351 + return 0;
50352 +}
50353 +
50354 +int
50355 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50356 +{
50357 + struct task_struct *p;
50358 + int ret = 0;
50359 +
50360 + if (unlikely(!(gr_status & GR_READY) || !pid))
50361 + return ret;
50362 +
50363 + read_lock(&tasklist_lock);
50364 + do_each_pid_task(pid, type, p) {
50365 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50366 + p->acl != current->acl) {
50367 + ret = 1;
50368 + goto out;
50369 + }
50370 + } while_each_pid_task(pid, type, p);
50371 +out:
50372 + read_unlock(&tasklist_lock);
50373 +
50374 + return ret;
50375 +}
50376 +
50377 +void
50378 +gr_copy_label(struct task_struct *tsk)
50379 +{
50380 + tsk->signal->used_accept = 0;
50381 + tsk->acl_sp_role = 0;
50382 + tsk->acl_role_id = current->acl_role_id;
50383 + tsk->acl = current->acl;
50384 + tsk->role = current->role;
50385 + tsk->signal->curr_ip = current->signal->curr_ip;
50386 + tsk->signal->saved_ip = current->signal->saved_ip;
50387 + if (current->exec_file)
50388 + get_file(current->exec_file);
50389 + tsk->exec_file = current->exec_file;
50390 + tsk->is_writable = current->is_writable;
50391 + if (unlikely(current->signal->used_accept)) {
50392 + current->signal->curr_ip = 0;
50393 + current->signal->saved_ip = 0;
50394 + }
50395 +
50396 + return;
50397 +}
50398 +
50399 +static void
50400 +gr_set_proc_res(struct task_struct *task)
50401 +{
50402 + struct acl_subject_label *proc;
50403 + unsigned short i;
50404 +
50405 + proc = task->acl;
50406 +
50407 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50408 + return;
50409 +
50410 + for (i = 0; i < RLIM_NLIMITS; i++) {
50411 + if (!(proc->resmask & (1 << i)))
50412 + continue;
50413 +
50414 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50415 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50416 + }
50417 +
50418 + return;
50419 +}
50420 +
50421 +extern int __gr_process_user_ban(struct user_struct *user);
50422 +
50423 +int
50424 +gr_check_user_change(int real, int effective, int fs)
50425 +{
50426 + unsigned int i;
50427 + __u16 num;
50428 + uid_t *uidlist;
50429 + int curuid;
50430 + int realok = 0;
50431 + int effectiveok = 0;
50432 + int fsok = 0;
50433 +
50434 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50435 + struct user_struct *user;
50436 +
50437 + if (real == -1)
50438 + goto skipit;
50439 +
50440 + user = find_user(real);
50441 + if (user == NULL)
50442 + goto skipit;
50443 +
50444 + if (__gr_process_user_ban(user)) {
50445 + /* for find_user */
50446 + free_uid(user);
50447 + return 1;
50448 + }
50449 +
50450 + /* for find_user */
50451 + free_uid(user);
50452 +
50453 +skipit:
50454 +#endif
50455 +
50456 + if (unlikely(!(gr_status & GR_READY)))
50457 + return 0;
50458 +
50459 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50460 + gr_log_learn_id_change('u', real, effective, fs);
50461 +
50462 + num = current->acl->user_trans_num;
50463 + uidlist = current->acl->user_transitions;
50464 +
50465 + if (uidlist == NULL)
50466 + return 0;
50467 +
50468 + if (real == -1)
50469 + realok = 1;
50470 + if (effective == -1)
50471 + effectiveok = 1;
50472 + if (fs == -1)
50473 + fsok = 1;
50474 +
50475 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
50476 + for (i = 0; i < num; i++) {
50477 + curuid = (int)uidlist[i];
50478 + if (real == curuid)
50479 + realok = 1;
50480 + if (effective == curuid)
50481 + effectiveok = 1;
50482 + if (fs == curuid)
50483 + fsok = 1;
50484 + }
50485 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
50486 + for (i = 0; i < num; i++) {
50487 + curuid = (int)uidlist[i];
50488 + if (real == curuid)
50489 + break;
50490 + if (effective == curuid)
50491 + break;
50492 + if (fs == curuid)
50493 + break;
50494 + }
50495 + /* not in deny list */
50496 + if (i == num) {
50497 + realok = 1;
50498 + effectiveok = 1;
50499 + fsok = 1;
50500 + }
50501 + }
50502 +
50503 + if (realok && effectiveok && fsok)
50504 + return 0;
50505 + else {
50506 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50507 + return 1;
50508 + }
50509 +}
50510 +
50511 +int
50512 +gr_check_group_change(int real, int effective, int fs)
50513 +{
50514 + unsigned int i;
50515 + __u16 num;
50516 + gid_t *gidlist;
50517 + int curgid;
50518 + int realok = 0;
50519 + int effectiveok = 0;
50520 + int fsok = 0;
50521 +
50522 + if (unlikely(!(gr_status & GR_READY)))
50523 + return 0;
50524 +
50525 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50526 + gr_log_learn_id_change('g', real, effective, fs);
50527 +
50528 + num = current->acl->group_trans_num;
50529 + gidlist = current->acl->group_transitions;
50530 +
50531 + if (gidlist == NULL)
50532 + return 0;
50533 +
50534 + if (real == -1)
50535 + realok = 1;
50536 + if (effective == -1)
50537 + effectiveok = 1;
50538 + if (fs == -1)
50539 + fsok = 1;
50540 +
50541 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
50542 + for (i = 0; i < num; i++) {
50543 + curgid = (int)gidlist[i];
50544 + if (real == curgid)
50545 + realok = 1;
50546 + if (effective == curgid)
50547 + effectiveok = 1;
50548 + if (fs == curgid)
50549 + fsok = 1;
50550 + }
50551 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
50552 + for (i = 0; i < num; i++) {
50553 + curgid = (int)gidlist[i];
50554 + if (real == curgid)
50555 + break;
50556 + if (effective == curgid)
50557 + break;
50558 + if (fs == curgid)
50559 + break;
50560 + }
50561 + /* not in deny list */
50562 + if (i == num) {
50563 + realok = 1;
50564 + effectiveok = 1;
50565 + fsok = 1;
50566 + }
50567 + }
50568 +
50569 + if (realok && effectiveok && fsok)
50570 + return 0;
50571 + else {
50572 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50573 + return 1;
50574 + }
50575 +}
50576 +
50577 +void
50578 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50579 +{
50580 + struct acl_role_label *role = task->role;
50581 + struct acl_subject_label *subj = NULL;
50582 + struct acl_object_label *obj;
50583 + struct file *filp;
50584 +
50585 + if (unlikely(!(gr_status & GR_READY)))
50586 + return;
50587 +
50588 + filp = task->exec_file;
50589 +
50590 + /* kernel process, we'll give them the kernel role */
50591 + if (unlikely(!filp)) {
50592 + task->role = kernel_role;
50593 + task->acl = kernel_role->root_label;
50594 + return;
50595 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50596 + role = lookup_acl_role_label(task, uid, gid);
50597 +
50598 + /* perform subject lookup in possibly new role
50599 + we can use this result below in the case where role == task->role
50600 + */
50601 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50602 +
50603 + /* if we changed uid/gid, but result in the same role
50604 + and are using inheritance, don't lose the inherited subject
50605 + if current subject is other than what normal lookup
50606 + would result in, we arrived via inheritance, don't
50607 + lose subject
50608 + */
50609 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50610 + (subj == task->acl)))
50611 + task->acl = subj;
50612 +
50613 + task->role = role;
50614 +
50615 + task->is_writable = 0;
50616 +
50617 + /* ignore additional mmap checks for processes that are writable
50618 + by the default ACL */
50619 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50620 + if (unlikely(obj->mode & GR_WRITE))
50621 + task->is_writable = 1;
50622 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50623 + if (unlikely(obj->mode & GR_WRITE))
50624 + task->is_writable = 1;
50625 +
50626 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50627 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50628 +#endif
50629 +
50630 + gr_set_proc_res(task);
50631 +
50632 + return;
50633 +}
50634 +
50635 +int
50636 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50637 + const int unsafe_flags)
50638 +{
50639 + struct task_struct *task = current;
50640 + struct acl_subject_label *newacl;
50641 + struct acl_object_label *obj;
50642 + __u32 retmode;
50643 +
50644 + if (unlikely(!(gr_status & GR_READY)))
50645 + return 0;
50646 +
50647 + newacl = chk_subj_label(dentry, mnt, task->role);
50648 +
50649 + task_lock(task);
50650 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
50651 + !(task->role->roletype & GR_ROLE_GOD) &&
50652 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
50653 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50654 + task_unlock(task);
50655 + if (unsafe_flags & LSM_UNSAFE_SHARE)
50656 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
50657 + else
50658 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
50659 + return -EACCES;
50660 + }
50661 + task_unlock(task);
50662 +
50663 + obj = chk_obj_label(dentry, mnt, task->acl);
50664 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
50665 +
50666 + if (!(task->acl->mode & GR_INHERITLEARN) &&
50667 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
50668 + if (obj->nested)
50669 + task->acl = obj->nested;
50670 + else
50671 + task->acl = newacl;
50672 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
50673 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
50674 +
50675 + task->is_writable = 0;
50676 +
50677 + /* ignore additional mmap checks for processes that are writable
50678 + by the default ACL */
50679 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
50680 + if (unlikely(obj->mode & GR_WRITE))
50681 + task->is_writable = 1;
50682 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
50683 + if (unlikely(obj->mode & GR_WRITE))
50684 + task->is_writable = 1;
50685 +
50686 + gr_set_proc_res(task);
50687 +
50688 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50689 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50690 +#endif
50691 + return 0;
50692 +}
50693 +
50694 +/* always called with valid inodev ptr */
50695 +static void
50696 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
50697 +{
50698 + struct acl_object_label *matchpo;
50699 + struct acl_subject_label *matchps;
50700 + struct acl_subject_label *subj;
50701 + struct acl_role_label *role;
50702 + unsigned int x;
50703 +
50704 + FOR_EACH_ROLE_START(role)
50705 + FOR_EACH_SUBJECT_START(role, subj, x)
50706 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
50707 + matchpo->mode |= GR_DELETED;
50708 + FOR_EACH_SUBJECT_END(subj,x)
50709 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
50710 + if (subj->inode == ino && subj->device == dev)
50711 + subj->mode |= GR_DELETED;
50712 + FOR_EACH_NESTED_SUBJECT_END(subj)
50713 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
50714 + matchps->mode |= GR_DELETED;
50715 + FOR_EACH_ROLE_END(role)
50716 +
50717 + inodev->nentry->deleted = 1;
50718 +
50719 + return;
50720 +}
50721 +
50722 +void
50723 +gr_handle_delete(const ino_t ino, const dev_t dev)
50724 +{
50725 + struct inodev_entry *inodev;
50726 +
50727 + if (unlikely(!(gr_status & GR_READY)))
50728 + return;
50729 +
50730 + write_lock(&gr_inode_lock);
50731 + inodev = lookup_inodev_entry(ino, dev);
50732 + if (inodev != NULL)
50733 + do_handle_delete(inodev, ino, dev);
50734 + write_unlock(&gr_inode_lock);
50735 +
50736 + return;
50737 +}
50738 +
50739 +static void
50740 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50741 + const ino_t newinode, const dev_t newdevice,
50742 + struct acl_subject_label *subj)
50743 +{
50744 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50745 + struct acl_object_label *match;
50746 +
50747 + match = subj->obj_hash[index];
50748 +
50749 + while (match && (match->inode != oldinode ||
50750 + match->device != olddevice ||
50751 + !(match->mode & GR_DELETED)))
50752 + match = match->next;
50753 +
50754 + if (match && (match->inode == oldinode)
50755 + && (match->device == olddevice)
50756 + && (match->mode & GR_DELETED)) {
50757 + if (match->prev == NULL) {
50758 + subj->obj_hash[index] = match->next;
50759 + if (match->next != NULL)
50760 + match->next->prev = NULL;
50761 + } else {
50762 + match->prev->next = match->next;
50763 + if (match->next != NULL)
50764 + match->next->prev = match->prev;
50765 + }
50766 + match->prev = NULL;
50767 + match->next = NULL;
50768 + match->inode = newinode;
50769 + match->device = newdevice;
50770 + match->mode &= ~GR_DELETED;
50771 +
50772 + insert_acl_obj_label(match, subj);
50773 + }
50774 +
50775 + return;
50776 +}
50777 +
50778 +static void
50779 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50780 + const ino_t newinode, const dev_t newdevice,
50781 + struct acl_role_label *role)
50782 +{
50783 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50784 + struct acl_subject_label *match;
50785 +
50786 + match = role->subj_hash[index];
50787 +
50788 + while (match && (match->inode != oldinode ||
50789 + match->device != olddevice ||
50790 + !(match->mode & GR_DELETED)))
50791 + match = match->next;
50792 +
50793 + if (match && (match->inode == oldinode)
50794 + && (match->device == olddevice)
50795 + && (match->mode & GR_DELETED)) {
50796 + if (match->prev == NULL) {
50797 + role->subj_hash[index] = match->next;
50798 + if (match->next != NULL)
50799 + match->next->prev = NULL;
50800 + } else {
50801 + match->prev->next = match->next;
50802 + if (match->next != NULL)
50803 + match->next->prev = match->prev;
50804 + }
50805 + match->prev = NULL;
50806 + match->next = NULL;
50807 + match->inode = newinode;
50808 + match->device = newdevice;
50809 + match->mode &= ~GR_DELETED;
50810 +
50811 + insert_acl_subj_label(match, role);
50812 + }
50813 +
50814 + return;
50815 +}
50816 +
50817 +static void
50818 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50819 + const ino_t newinode, const dev_t newdevice)
50820 +{
50821 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50822 + struct inodev_entry *match;
50823 +
50824 + match = inodev_set.i_hash[index];
50825 +
50826 + while (match && (match->nentry->inode != oldinode ||
50827 + match->nentry->device != olddevice || !match->nentry->deleted))
50828 + match = match->next;
50829 +
50830 + if (match && (match->nentry->inode == oldinode)
50831 + && (match->nentry->device == olddevice) &&
50832 + match->nentry->deleted) {
50833 + if (match->prev == NULL) {
50834 + inodev_set.i_hash[index] = match->next;
50835 + if (match->next != NULL)
50836 + match->next->prev = NULL;
50837 + } else {
50838 + match->prev->next = match->next;
50839 + if (match->next != NULL)
50840 + match->next->prev = match->prev;
50841 + }
50842 + match->prev = NULL;
50843 + match->next = NULL;
50844 + match->nentry->inode = newinode;
50845 + match->nentry->device = newdevice;
50846 + match->nentry->deleted = 0;
50847 +
50848 + insert_inodev_entry(match);
50849 + }
50850 +
50851 + return;
50852 +}
50853 +
50854 +static void
50855 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
50856 +{
50857 + struct acl_subject_label *subj;
50858 + struct acl_role_label *role;
50859 + unsigned int x;
50860 +
50861 + FOR_EACH_ROLE_START(role)
50862 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
50863 +
50864 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
50865 + if ((subj->inode == ino) && (subj->device == dev)) {
50866 + subj->inode = ino;
50867 + subj->device = dev;
50868 + }
50869 + FOR_EACH_NESTED_SUBJECT_END(subj)
50870 + FOR_EACH_SUBJECT_START(role, subj, x)
50871 + update_acl_obj_label(matchn->inode, matchn->device,
50872 + ino, dev, subj);
50873 + FOR_EACH_SUBJECT_END(subj,x)
50874 + FOR_EACH_ROLE_END(role)
50875 +
50876 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
50877 +
50878 + return;
50879 +}
50880 +
50881 +static void
50882 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50883 + const struct vfsmount *mnt)
50884 +{
50885 + ino_t ino = dentry->d_inode->i_ino;
50886 + dev_t dev = __get_dev(dentry);
50887 +
50888 + __do_handle_create(matchn, ino, dev);
50889 +
50890 + return;
50891 +}
50892 +
50893 +void
50894 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50895 +{
50896 + struct name_entry *matchn;
50897 +
50898 + if (unlikely(!(gr_status & GR_READY)))
50899 + return;
50900 +
50901 + preempt_disable();
50902 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50903 +
50904 + if (unlikely((unsigned long)matchn)) {
50905 + write_lock(&gr_inode_lock);
50906 + do_handle_create(matchn, dentry, mnt);
50907 + write_unlock(&gr_inode_lock);
50908 + }
50909 + preempt_enable();
50910 +
50911 + return;
50912 +}
50913 +
50914 +void
50915 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
50916 +{
50917 + struct name_entry *matchn;
50918 +
50919 + if (unlikely(!(gr_status & GR_READY)))
50920 + return;
50921 +
50922 + preempt_disable();
50923 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
50924 +
50925 + if (unlikely((unsigned long)matchn)) {
50926 + write_lock(&gr_inode_lock);
50927 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
50928 + write_unlock(&gr_inode_lock);
50929 + }
50930 + preempt_enable();
50931 +
50932 + return;
50933 +}
50934 +
50935 +void
50936 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50937 + struct dentry *old_dentry,
50938 + struct dentry *new_dentry,
50939 + struct vfsmount *mnt, const __u8 replace)
50940 +{
50941 + struct name_entry *matchn;
50942 + struct inodev_entry *inodev;
50943 + struct inode *inode = new_dentry->d_inode;
50944 + ino_t old_ino = old_dentry->d_inode->i_ino;
50945 + dev_t old_dev = __get_dev(old_dentry);
50946 +
50947 + /* vfs_rename swaps the name and parent link for old_dentry and
50948 + new_dentry
50949 + at this point, old_dentry has the new name, parent link, and inode
50950 + for the renamed file
50951 + if a file is being replaced by a rename, new_dentry has the inode
50952 + and name for the replaced file
50953 + */
50954 +
50955 + if (unlikely(!(gr_status & GR_READY)))
50956 + return;
50957 +
50958 + preempt_disable();
50959 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
50960 +
50961 + /* we wouldn't have to check d_inode if it weren't for
50962 + NFS silly-renaming
50963 + */
50964 +
50965 + write_lock(&gr_inode_lock);
50966 + if (unlikely(replace && inode)) {
50967 + ino_t new_ino = inode->i_ino;
50968 + dev_t new_dev = __get_dev(new_dentry);
50969 +
50970 + inodev = lookup_inodev_entry(new_ino, new_dev);
50971 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
50972 + do_handle_delete(inodev, new_ino, new_dev);
50973 + }
50974 +
50975 + inodev = lookup_inodev_entry(old_ino, old_dev);
50976 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
50977 + do_handle_delete(inodev, old_ino, old_dev);
50978 +
50979 + if (unlikely((unsigned long)matchn))
50980 + do_handle_create(matchn, old_dentry, mnt);
50981 +
50982 + write_unlock(&gr_inode_lock);
50983 + preempt_enable();
50984 +
50985 + return;
50986 +}
50987 +
50988 +static int
50989 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
50990 + unsigned char **sum)
50991 +{
50992 + struct acl_role_label *r;
50993 + struct role_allowed_ip *ipp;
50994 + struct role_transition *trans;
50995 + unsigned int i;
50996 + int found = 0;
50997 + u32 curr_ip = current->signal->curr_ip;
50998 +
50999 + current->signal->saved_ip = curr_ip;
51000 +
51001 + /* check transition table */
51002 +
51003 + for (trans = current->role->transitions; trans; trans = trans->next) {
51004 + if (!strcmp(rolename, trans->rolename)) {
51005 + found = 1;
51006 + break;
51007 + }
51008 + }
51009 +
51010 + if (!found)
51011 + return 0;
51012 +
51013 + /* handle special roles that do not require authentication
51014 + and check ip */
51015 +
51016 + FOR_EACH_ROLE_START(r)
51017 + if (!strcmp(rolename, r->rolename) &&
51018 + (r->roletype & GR_ROLE_SPECIAL)) {
51019 + found = 0;
51020 + if (r->allowed_ips != NULL) {
51021 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51022 + if ((ntohl(curr_ip) & ipp->netmask) ==
51023 + (ntohl(ipp->addr) & ipp->netmask))
51024 + found = 1;
51025 + }
51026 + } else
51027 + found = 2;
51028 + if (!found)
51029 + return 0;
51030 +
51031 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51032 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51033 + *salt = NULL;
51034 + *sum = NULL;
51035 + return 1;
51036 + }
51037 + }
51038 + FOR_EACH_ROLE_END(r)
51039 +
51040 + for (i = 0; i < num_sprole_pws; i++) {
51041 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51042 + *salt = acl_special_roles[i]->salt;
51043 + *sum = acl_special_roles[i]->sum;
51044 + return 1;
51045 + }
51046 + }
51047 +
51048 + return 0;
51049 +}
51050 +
51051 +static void
51052 +assign_special_role(char *rolename)
51053 +{
51054 + struct acl_object_label *obj;
51055 + struct acl_role_label *r;
51056 + struct acl_role_label *assigned = NULL;
51057 + struct task_struct *tsk;
51058 + struct file *filp;
51059 +
51060 + FOR_EACH_ROLE_START(r)
51061 + if (!strcmp(rolename, r->rolename) &&
51062 + (r->roletype & GR_ROLE_SPECIAL)) {
51063 + assigned = r;
51064 + break;
51065 + }
51066 + FOR_EACH_ROLE_END(r)
51067 +
51068 + if (!assigned)
51069 + return;
51070 +
51071 + read_lock(&tasklist_lock);
51072 + read_lock(&grsec_exec_file_lock);
51073 +
51074 + tsk = current->real_parent;
51075 + if (tsk == NULL)
51076 + goto out_unlock;
51077 +
51078 + filp = tsk->exec_file;
51079 + if (filp == NULL)
51080 + goto out_unlock;
51081 +
51082 + tsk->is_writable = 0;
51083 +
51084 + tsk->acl_sp_role = 1;
51085 + tsk->acl_role_id = ++acl_sp_role_value;
51086 + tsk->role = assigned;
51087 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51088 +
51089 + /* ignore additional mmap checks for processes that are writable
51090 + by the default ACL */
51091 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51092 + if (unlikely(obj->mode & GR_WRITE))
51093 + tsk->is_writable = 1;
51094 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51095 + if (unlikely(obj->mode & GR_WRITE))
51096 + tsk->is_writable = 1;
51097 +
51098 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51099 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51100 +#endif
51101 +
51102 +out_unlock:
51103 + read_unlock(&grsec_exec_file_lock);
51104 + read_unlock(&tasklist_lock);
51105 + return;
51106 +}
51107 +
51108 +int gr_check_secure_terminal(struct task_struct *task)
51109 +{
51110 + struct task_struct *p, *p2, *p3;
51111 + struct files_struct *files;
51112 + struct fdtable *fdt;
51113 + struct file *our_file = NULL, *file;
51114 + int i;
51115 +
51116 + if (task->signal->tty == NULL)
51117 + return 1;
51118 +
51119 + files = get_files_struct(task);
51120 + if (files != NULL) {
51121 + rcu_read_lock();
51122 + fdt = files_fdtable(files);
51123 + for (i=0; i < fdt->max_fds; i++) {
51124 + file = fcheck_files(files, i);
51125 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51126 + get_file(file);
51127 + our_file = file;
51128 + }
51129 + }
51130 + rcu_read_unlock();
51131 + put_files_struct(files);
51132 + }
51133 +
51134 + if (our_file == NULL)
51135 + return 1;
51136 +
51137 + read_lock(&tasklist_lock);
51138 + do_each_thread(p2, p) {
51139 + files = get_files_struct(p);
51140 + if (files == NULL ||
51141 + (p->signal && p->signal->tty == task->signal->tty)) {
51142 + if (files != NULL)
51143 + put_files_struct(files);
51144 + continue;
51145 + }
51146 + rcu_read_lock();
51147 + fdt = files_fdtable(files);
51148 + for (i=0; i < fdt->max_fds; i++) {
51149 + file = fcheck_files(files, i);
51150 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51151 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51152 + p3 = task;
51153 + while (p3->pid > 0) {
51154 + if (p3 == p)
51155 + break;
51156 + p3 = p3->real_parent;
51157 + }
51158 + if (p3 == p)
51159 + break;
51160 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51161 + gr_handle_alertkill(p);
51162 + rcu_read_unlock();
51163 + put_files_struct(files);
51164 + read_unlock(&tasklist_lock);
51165 + fput(our_file);
51166 + return 0;
51167 + }
51168 + }
51169 + rcu_read_unlock();
51170 + put_files_struct(files);
51171 + } while_each_thread(p2, p);
51172 + read_unlock(&tasklist_lock);
51173 +
51174 + fput(our_file);
51175 + return 1;
51176 +}
51177 +
51178 +ssize_t
51179 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51180 +{
51181 + struct gr_arg_wrapper uwrap;
51182 + unsigned char *sprole_salt = NULL;
51183 + unsigned char *sprole_sum = NULL;
51184 + int error = sizeof (struct gr_arg_wrapper);
51185 + int error2 = 0;
51186 +
51187 + mutex_lock(&gr_dev_mutex);
51188 +
51189 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51190 + error = -EPERM;
51191 + goto out;
51192 + }
51193 +
51194 + if (count != sizeof (struct gr_arg_wrapper)) {
51195 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51196 + error = -EINVAL;
51197 + goto out;
51198 + }
51199 +
51200 +
51201 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51202 + gr_auth_expires = 0;
51203 + gr_auth_attempts = 0;
51204 + }
51205 +
51206 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51207 + error = -EFAULT;
51208 + goto out;
51209 + }
51210 +
51211 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51212 + error = -EINVAL;
51213 + goto out;
51214 + }
51215 +
51216 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51217 + error = -EFAULT;
51218 + goto out;
51219 + }
51220 +
51221 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51222 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51223 + time_after(gr_auth_expires, get_seconds())) {
51224 + error = -EBUSY;
51225 + goto out;
51226 + }
51227 +
51228 + /* if non-root trying to do anything other than use a special role,
51229 + do not attempt authentication, do not count towards authentication
51230 + locking
51231 + */
51232 +
51233 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51234 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51235 + current_uid()) {
51236 + error = -EPERM;
51237 + goto out;
51238 + }
51239 +
51240 + /* ensure pw and special role name are null terminated */
51241 +
51242 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51243 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51244 +
51245 + /* Okay.
51246 + * We have our enough of the argument structure..(we have yet
51247 + * to copy_from_user the tables themselves) . Copy the tables
51248 + * only if we need them, i.e. for loading operations. */
51249 +
51250 + switch (gr_usermode->mode) {
51251 + case GR_STATUS:
51252 + if (gr_status & GR_READY) {
51253 + error = 1;
51254 + if (!gr_check_secure_terminal(current))
51255 + error = 3;
51256 + } else
51257 + error = 2;
51258 + goto out;
51259 + case GR_SHUTDOWN:
51260 + if ((gr_status & GR_READY)
51261 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51262 + pax_open_kernel();
51263 + gr_status &= ~GR_READY;
51264 + pax_close_kernel();
51265 +
51266 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51267 + free_variables();
51268 + memset(gr_usermode, 0, sizeof (struct gr_arg));
51269 + memset(gr_system_salt, 0, GR_SALT_LEN);
51270 + memset(gr_system_sum, 0, GR_SHA_LEN);
51271 + } else if (gr_status & GR_READY) {
51272 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51273 + error = -EPERM;
51274 + } else {
51275 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51276 + error = -EAGAIN;
51277 + }
51278 + break;
51279 + case GR_ENABLE:
51280 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51281 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51282 + else {
51283 + if (gr_status & GR_READY)
51284 + error = -EAGAIN;
51285 + else
51286 + error = error2;
51287 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51288 + }
51289 + break;
51290 + case GR_RELOAD:
51291 + if (!(gr_status & GR_READY)) {
51292 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51293 + error = -EAGAIN;
51294 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51295 + preempt_disable();
51296 +
51297 + pax_open_kernel();
51298 + gr_status &= ~GR_READY;
51299 + pax_close_kernel();
51300 +
51301 + free_variables();
51302 + if (!(error2 = gracl_init(gr_usermode))) {
51303 + preempt_enable();
51304 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51305 + } else {
51306 + preempt_enable();
51307 + error = error2;
51308 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51309 + }
51310 + } else {
51311 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51312 + error = -EPERM;
51313 + }
51314 + break;
51315 + case GR_SEGVMOD:
51316 + if (unlikely(!(gr_status & GR_READY))) {
51317 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51318 + error = -EAGAIN;
51319 + break;
51320 + }
51321 +
51322 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51323 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51324 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51325 + struct acl_subject_label *segvacl;
51326 + segvacl =
51327 + lookup_acl_subj_label(gr_usermode->segv_inode,
51328 + gr_usermode->segv_device,
51329 + current->role);
51330 + if (segvacl) {
51331 + segvacl->crashes = 0;
51332 + segvacl->expires = 0;
51333 + }
51334 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51335 + gr_remove_uid(gr_usermode->segv_uid);
51336 + }
51337 + } else {
51338 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51339 + error = -EPERM;
51340 + }
51341 + break;
51342 + case GR_SPROLE:
51343 + case GR_SPROLEPAM:
51344 + if (unlikely(!(gr_status & GR_READY))) {
51345 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51346 + error = -EAGAIN;
51347 + break;
51348 + }
51349 +
51350 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51351 + current->role->expires = 0;
51352 + current->role->auth_attempts = 0;
51353 + }
51354 +
51355 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51356 + time_after(current->role->expires, get_seconds())) {
51357 + error = -EBUSY;
51358 + goto out;
51359 + }
51360 +
51361 + if (lookup_special_role_auth
51362 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51363 + && ((!sprole_salt && !sprole_sum)
51364 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51365 + char *p = "";
51366 + assign_special_role(gr_usermode->sp_role);
51367 + read_lock(&tasklist_lock);
51368 + if (current->real_parent)
51369 + p = current->real_parent->role->rolename;
51370 + read_unlock(&tasklist_lock);
51371 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51372 + p, acl_sp_role_value);
51373 + } else {
51374 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51375 + error = -EPERM;
51376 + if(!(current->role->auth_attempts++))
51377 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51378 +
51379 + goto out;
51380 + }
51381 + break;
51382 + case GR_UNSPROLE:
51383 + if (unlikely(!(gr_status & GR_READY))) {
51384 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51385 + error = -EAGAIN;
51386 + break;
51387 + }
51388 +
51389 + if (current->role->roletype & GR_ROLE_SPECIAL) {
51390 + char *p = "";
51391 + int i = 0;
51392 +
51393 + read_lock(&tasklist_lock);
51394 + if (current->real_parent) {
51395 + p = current->real_parent->role->rolename;
51396 + i = current->real_parent->acl_role_id;
51397 + }
51398 + read_unlock(&tasklist_lock);
51399 +
51400 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51401 + gr_set_acls(1);
51402 + } else {
51403 + error = -EPERM;
51404 + goto out;
51405 + }
51406 + break;
51407 + default:
51408 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51409 + error = -EINVAL;
51410 + break;
51411 + }
51412 +
51413 + if (error != -EPERM)
51414 + goto out;
51415 +
51416 + if(!(gr_auth_attempts++))
51417 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51418 +
51419 + out:
51420 + mutex_unlock(&gr_dev_mutex);
51421 + return error;
51422 +}
51423 +
51424 +/* must be called with
51425 + rcu_read_lock();
51426 + read_lock(&tasklist_lock);
51427 + read_lock(&grsec_exec_file_lock);
51428 +*/
51429 +int gr_apply_subject_to_task(struct task_struct *task)
51430 +{
51431 + struct acl_object_label *obj;
51432 + char *tmpname;
51433 + struct acl_subject_label *tmpsubj;
51434 + struct file *filp;
51435 + struct name_entry *nmatch;
51436 +
51437 + filp = task->exec_file;
51438 + if (filp == NULL)
51439 + return 0;
51440 +
51441 + /* the following is to apply the correct subject
51442 + on binaries running when the RBAC system
51443 + is enabled, when the binaries have been
51444 + replaced or deleted since their execution
51445 + -----
51446 + when the RBAC system starts, the inode/dev
51447 + from exec_file will be one the RBAC system
51448 + is unaware of. It only knows the inode/dev
51449 + of the present file on disk, or the absence
51450 + of it.
51451 + */
51452 + preempt_disable();
51453 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51454 +
51455 + nmatch = lookup_name_entry(tmpname);
51456 + preempt_enable();
51457 + tmpsubj = NULL;
51458 + if (nmatch) {
51459 + if (nmatch->deleted)
51460 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51461 + else
51462 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51463 + if (tmpsubj != NULL)
51464 + task->acl = tmpsubj;
51465 + }
51466 + if (tmpsubj == NULL)
51467 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51468 + task->role);
51469 + if (task->acl) {
51470 + task->is_writable = 0;
51471 + /* ignore additional mmap checks for processes that are writable
51472 + by the default ACL */
51473 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51474 + if (unlikely(obj->mode & GR_WRITE))
51475 + task->is_writable = 1;
51476 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51477 + if (unlikely(obj->mode & GR_WRITE))
51478 + task->is_writable = 1;
51479 +
51480 + gr_set_proc_res(task);
51481 +
51482 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51483 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51484 +#endif
51485 + } else {
51486 + return 1;
51487 + }
51488 +
51489 + return 0;
51490 +}
51491 +
51492 +int
51493 +gr_set_acls(const int type)
51494 +{
51495 + struct task_struct *task, *task2;
51496 + struct acl_role_label *role = current->role;
51497 + __u16 acl_role_id = current->acl_role_id;
51498 + const struct cred *cred;
51499 + int ret;
51500 +
51501 + rcu_read_lock();
51502 + read_lock(&tasklist_lock);
51503 + read_lock(&grsec_exec_file_lock);
51504 + do_each_thread(task2, task) {
51505 + /* check to see if we're called from the exit handler,
51506 + if so, only replace ACLs that have inherited the admin
51507 + ACL */
51508 +
51509 + if (type && (task->role != role ||
51510 + task->acl_role_id != acl_role_id))
51511 + continue;
51512 +
51513 + task->acl_role_id = 0;
51514 + task->acl_sp_role = 0;
51515 +
51516 + if (task->exec_file) {
51517 + cred = __task_cred(task);
51518 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51519 + ret = gr_apply_subject_to_task(task);
51520 + if (ret) {
51521 + read_unlock(&grsec_exec_file_lock);
51522 + read_unlock(&tasklist_lock);
51523 + rcu_read_unlock();
51524 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51525 + return ret;
51526 + }
51527 + } else {
51528 + // it's a kernel process
51529 + task->role = kernel_role;
51530 + task->acl = kernel_role->root_label;
51531 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51532 + task->acl->mode &= ~GR_PROCFIND;
51533 +#endif
51534 + }
51535 + } while_each_thread(task2, task);
51536 + read_unlock(&grsec_exec_file_lock);
51537 + read_unlock(&tasklist_lock);
51538 + rcu_read_unlock();
51539 +
51540 + return 0;
51541 +}
51542 +
51543 +void
51544 +gr_learn_resource(const struct task_struct *task,
51545 + const int res, const unsigned long wanted, const int gt)
51546 +{
51547 + struct acl_subject_label *acl;
51548 + const struct cred *cred;
51549 +
51550 + if (unlikely((gr_status & GR_READY) &&
51551 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51552 + goto skip_reslog;
51553 +
51554 +#ifdef CONFIG_GRKERNSEC_RESLOG
51555 + gr_log_resource(task, res, wanted, gt);
51556 +#endif
51557 + skip_reslog:
51558 +
51559 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51560 + return;
51561 +
51562 + acl = task->acl;
51563 +
51564 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51565 + !(acl->resmask & (1 << (unsigned short) res))))
51566 + return;
51567 +
51568 + if (wanted >= acl->res[res].rlim_cur) {
51569 + unsigned long res_add;
51570 +
51571 + res_add = wanted;
51572 + switch (res) {
51573 + case RLIMIT_CPU:
51574 + res_add += GR_RLIM_CPU_BUMP;
51575 + break;
51576 + case RLIMIT_FSIZE:
51577 + res_add += GR_RLIM_FSIZE_BUMP;
51578 + break;
51579 + case RLIMIT_DATA:
51580 + res_add += GR_RLIM_DATA_BUMP;
51581 + break;
51582 + case RLIMIT_STACK:
51583 + res_add += GR_RLIM_STACK_BUMP;
51584 + break;
51585 + case RLIMIT_CORE:
51586 + res_add += GR_RLIM_CORE_BUMP;
51587 + break;
51588 + case RLIMIT_RSS:
51589 + res_add += GR_RLIM_RSS_BUMP;
51590 + break;
51591 + case RLIMIT_NPROC:
51592 + res_add += GR_RLIM_NPROC_BUMP;
51593 + break;
51594 + case RLIMIT_NOFILE:
51595 + res_add += GR_RLIM_NOFILE_BUMP;
51596 + break;
51597 + case RLIMIT_MEMLOCK:
51598 + res_add += GR_RLIM_MEMLOCK_BUMP;
51599 + break;
51600 + case RLIMIT_AS:
51601 + res_add += GR_RLIM_AS_BUMP;
51602 + break;
51603 + case RLIMIT_LOCKS:
51604 + res_add += GR_RLIM_LOCKS_BUMP;
51605 + break;
51606 + case RLIMIT_SIGPENDING:
51607 + res_add += GR_RLIM_SIGPENDING_BUMP;
51608 + break;
51609 + case RLIMIT_MSGQUEUE:
51610 + res_add += GR_RLIM_MSGQUEUE_BUMP;
51611 + break;
51612 + case RLIMIT_NICE:
51613 + res_add += GR_RLIM_NICE_BUMP;
51614 + break;
51615 + case RLIMIT_RTPRIO:
51616 + res_add += GR_RLIM_RTPRIO_BUMP;
51617 + break;
51618 + case RLIMIT_RTTIME:
51619 + res_add += GR_RLIM_RTTIME_BUMP;
51620 + break;
51621 + }
51622 +
51623 + acl->res[res].rlim_cur = res_add;
51624 +
51625 + if (wanted > acl->res[res].rlim_max)
51626 + acl->res[res].rlim_max = res_add;
51627 +
51628 + /* only log the subject filename, since resource logging is supported for
51629 + single-subject learning only */
51630 + rcu_read_lock();
51631 + cred = __task_cred(task);
51632 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51633 + task->role->roletype, cred->uid, cred->gid, acl->filename,
51634 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51635 + "", (unsigned long) res, &task->signal->saved_ip);
51636 + rcu_read_unlock();
51637 + }
51638 +
51639 + return;
51640 +}
51641 +
51642 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51643 +void
51644 +pax_set_initial_flags(struct linux_binprm *bprm)
51645 +{
51646 + struct task_struct *task = current;
51647 + struct acl_subject_label *proc;
51648 + unsigned long flags;
51649 +
51650 + if (unlikely(!(gr_status & GR_READY)))
51651 + return;
51652 +
51653 + flags = pax_get_flags(task);
51654 +
51655 + proc = task->acl;
51656 +
51657 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
51658 + flags &= ~MF_PAX_PAGEEXEC;
51659 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
51660 + flags &= ~MF_PAX_SEGMEXEC;
51661 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
51662 + flags &= ~MF_PAX_RANDMMAP;
51663 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
51664 + flags &= ~MF_PAX_EMUTRAMP;
51665 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
51666 + flags &= ~MF_PAX_MPROTECT;
51667 +
51668 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
51669 + flags |= MF_PAX_PAGEEXEC;
51670 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
51671 + flags |= MF_PAX_SEGMEXEC;
51672 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
51673 + flags |= MF_PAX_RANDMMAP;
51674 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
51675 + flags |= MF_PAX_EMUTRAMP;
51676 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
51677 + flags |= MF_PAX_MPROTECT;
51678 +
51679 + pax_set_flags(task, flags);
51680 +
51681 + return;
51682 +}
51683 +#endif
51684 +
51685 +#ifdef CONFIG_SYSCTL
51686 +/* Eric Biederman likes breaking userland ABI and every inode-based security
51687 + system to save 35kb of memory */
51688 +
51689 +/* we modify the passed in filename, but adjust it back before returning */
51690 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
51691 +{
51692 + struct name_entry *nmatch;
51693 + char *p, *lastp = NULL;
51694 + struct acl_object_label *obj = NULL, *tmp;
51695 + struct acl_subject_label *tmpsubj;
51696 + char c = '\0';
51697 +
51698 + read_lock(&gr_inode_lock);
51699 +
51700 + p = name + len - 1;
51701 + do {
51702 + nmatch = lookup_name_entry(name);
51703 + if (lastp != NULL)
51704 + *lastp = c;
51705 +
51706 + if (nmatch == NULL)
51707 + goto next_component;
51708 + tmpsubj = current->acl;
51709 + do {
51710 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
51711 + if (obj != NULL) {
51712 + tmp = obj->globbed;
51713 + while (tmp) {
51714 + if (!glob_match(tmp->filename, name)) {
51715 + obj = tmp;
51716 + goto found_obj;
51717 + }
51718 + tmp = tmp->next;
51719 + }
51720 + goto found_obj;
51721 + }
51722 + } while ((tmpsubj = tmpsubj->parent_subject));
51723 +next_component:
51724 + /* end case */
51725 + if (p == name)
51726 + break;
51727 +
51728 + while (*p != '/')
51729 + p--;
51730 + if (p == name)
51731 + lastp = p + 1;
51732 + else {
51733 + lastp = p;
51734 + p--;
51735 + }
51736 + c = *lastp;
51737 + *lastp = '\0';
51738 + } while (1);
51739 +found_obj:
51740 + read_unlock(&gr_inode_lock);
51741 + /* obj returned will always be non-null */
51742 + return obj;
51743 +}
51744 +
51745 +/* returns 0 when allowing, non-zero on error
51746 + op of 0 is used for readdir, so we don't log the names of hidden files
51747 +*/
51748 +__u32
51749 +gr_handle_sysctl(const struct ctl_table *table, const int op)
51750 +{
51751 + struct ctl_table *tmp;
51752 + const char *proc_sys = "/proc/sys";
51753 + char *path;
51754 + struct acl_object_label *obj;
51755 + unsigned short len = 0, pos = 0, depth = 0, i;
51756 + __u32 err = 0;
51757 + __u32 mode = 0;
51758 +
51759 + if (unlikely(!(gr_status & GR_READY)))
51760 + return 0;
51761 +
51762 + /* for now, ignore operations on non-sysctl entries if it's not a
51763 + readdir*/
51764 + if (table->child != NULL && op != 0)
51765 + return 0;
51766 +
51767 + mode |= GR_FIND;
51768 + /* it's only a read if it's an entry, read on dirs is for readdir */
51769 + if (op & MAY_READ)
51770 + mode |= GR_READ;
51771 + if (op & MAY_WRITE)
51772 + mode |= GR_WRITE;
51773 +
51774 + preempt_disable();
51775 +
51776 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51777 +
51778 + /* it's only a read/write if it's an actual entry, not a dir
51779 + (which are opened for readdir)
51780 + */
51781 +
51782 + /* convert the requested sysctl entry into a pathname */
51783 +
51784 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51785 + len += strlen(tmp->procname);
51786 + len++;
51787 + depth++;
51788 + }
51789 +
51790 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51791 + /* deny */
51792 + goto out;
51793 + }
51794 +
51795 + memset(path, 0, PAGE_SIZE);
51796 +
51797 + memcpy(path, proc_sys, strlen(proc_sys));
51798 +
51799 + pos += strlen(proc_sys);
51800 +
51801 + for (; depth > 0; depth--) {
51802 + path[pos] = '/';
51803 + pos++;
51804 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51805 + if (depth == i) {
51806 + memcpy(path + pos, tmp->procname,
51807 + strlen(tmp->procname));
51808 + pos += strlen(tmp->procname);
51809 + }
51810 + i++;
51811 + }
51812 + }
51813 +
51814 + obj = gr_lookup_by_name(path, pos);
51815 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51816 +
51817 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51818 + ((err & mode) != mode))) {
51819 + __u32 new_mode = mode;
51820 +
51821 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51822 +
51823 + err = 0;
51824 + gr_log_learn_sysctl(path, new_mode);
51825 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51826 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51827 + err = -ENOENT;
51828 + } else if (!(err & GR_FIND)) {
51829 + err = -ENOENT;
51830 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51831 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51832 + path, (mode & GR_READ) ? " reading" : "",
51833 + (mode & GR_WRITE) ? " writing" : "");
51834 + err = -EACCES;
51835 + } else if ((err & mode) != mode) {
51836 + err = -EACCES;
51837 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51838 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51839 + path, (mode & GR_READ) ? " reading" : "",
51840 + (mode & GR_WRITE) ? " writing" : "");
51841 + err = 0;
51842 + } else
51843 + err = 0;
51844 +
51845 + out:
51846 + preempt_enable();
51847 +
51848 + return err;
51849 +}
51850 +#endif
51851 +
51852 +int
51853 +gr_handle_proc_ptrace(struct task_struct *task)
51854 +{
51855 + struct file *filp;
51856 + struct task_struct *tmp = task;
51857 + struct task_struct *curtemp = current;
51858 + __u32 retmode;
51859 +
51860 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51861 + if (unlikely(!(gr_status & GR_READY)))
51862 + return 0;
51863 +#endif
51864 +
51865 + read_lock(&tasklist_lock);
51866 + read_lock(&grsec_exec_file_lock);
51867 + filp = task->exec_file;
51868 +
51869 + while (tmp->pid > 0) {
51870 + if (tmp == curtemp)
51871 + break;
51872 + tmp = tmp->real_parent;
51873 + }
51874 +
51875 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51876 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51877 + read_unlock(&grsec_exec_file_lock);
51878 + read_unlock(&tasklist_lock);
51879 + return 1;
51880 + }
51881 +
51882 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51883 + if (!(gr_status & GR_READY)) {
51884 + read_unlock(&grsec_exec_file_lock);
51885 + read_unlock(&tasklist_lock);
51886 + return 0;
51887 + }
51888 +#endif
51889 +
51890 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51891 + read_unlock(&grsec_exec_file_lock);
51892 + read_unlock(&tasklist_lock);
51893 +
51894 + if (retmode & GR_NOPTRACE)
51895 + return 1;
51896 +
51897 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51898 + && (current->acl != task->acl || (current->acl != current->role->root_label
51899 + && current->pid != task->pid)))
51900 + return 1;
51901 +
51902 + return 0;
51903 +}
51904 +
51905 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
51906 +{
51907 + if (unlikely(!(gr_status & GR_READY)))
51908 + return;
51909 +
51910 + if (!(current->role->roletype & GR_ROLE_GOD))
51911 + return;
51912 +
51913 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
51914 + p->role->rolename, gr_task_roletype_to_char(p),
51915 + p->acl->filename);
51916 +}
51917 +
51918 +int
51919 +gr_handle_ptrace(struct task_struct *task, const long request)
51920 +{
51921 + struct task_struct *tmp = task;
51922 + struct task_struct *curtemp = current;
51923 + __u32 retmode;
51924 +
51925 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51926 + if (unlikely(!(gr_status & GR_READY)))
51927 + return 0;
51928 +#endif
51929 +
51930 + read_lock(&tasklist_lock);
51931 + while (tmp->pid > 0) {
51932 + if (tmp == curtemp)
51933 + break;
51934 + tmp = tmp->real_parent;
51935 + }
51936 +
51937 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51938 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
51939 + read_unlock(&tasklist_lock);
51940 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51941 + return 1;
51942 + }
51943 + read_unlock(&tasklist_lock);
51944 +
51945 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51946 + if (!(gr_status & GR_READY))
51947 + return 0;
51948 +#endif
51949 +
51950 + read_lock(&grsec_exec_file_lock);
51951 + if (unlikely(!task->exec_file)) {
51952 + read_unlock(&grsec_exec_file_lock);
51953 + return 0;
51954 + }
51955 +
51956 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
51957 + read_unlock(&grsec_exec_file_lock);
51958 +
51959 + if (retmode & GR_NOPTRACE) {
51960 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51961 + return 1;
51962 + }
51963 +
51964 + if (retmode & GR_PTRACERD) {
51965 + switch (request) {
51966 + case PTRACE_SEIZE:
51967 + case PTRACE_POKETEXT:
51968 + case PTRACE_POKEDATA:
51969 + case PTRACE_POKEUSR:
51970 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
51971 + case PTRACE_SETREGS:
51972 + case PTRACE_SETFPREGS:
51973 +#endif
51974 +#ifdef CONFIG_X86
51975 + case PTRACE_SETFPXREGS:
51976 +#endif
51977 +#ifdef CONFIG_ALTIVEC
51978 + case PTRACE_SETVRREGS:
51979 +#endif
51980 + return 1;
51981 + default:
51982 + return 0;
51983 + }
51984 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
51985 + !(current->role->roletype & GR_ROLE_GOD) &&
51986 + (current->acl != task->acl)) {
51987 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51988 + return 1;
51989 + }
51990 +
51991 + return 0;
51992 +}
51993 +
51994 +static int is_writable_mmap(const struct file *filp)
51995 +{
51996 + struct task_struct *task = current;
51997 + struct acl_object_label *obj, *obj2;
51998 +
51999 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52000 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52001 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52002 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52003 + task->role->root_label);
52004 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52005 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52006 + return 1;
52007 + }
52008 + }
52009 + return 0;
52010 +}
52011 +
52012 +int
52013 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52014 +{
52015 + __u32 mode;
52016 +
52017 + if (unlikely(!file || !(prot & PROT_EXEC)))
52018 + return 1;
52019 +
52020 + if (is_writable_mmap(file))
52021 + return 0;
52022 +
52023 + mode =
52024 + gr_search_file(file->f_path.dentry,
52025 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52026 + file->f_path.mnt);
52027 +
52028 + if (!gr_tpe_allow(file))
52029 + return 0;
52030 +
52031 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52032 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52033 + return 0;
52034 + } else if (unlikely(!(mode & GR_EXEC))) {
52035 + return 0;
52036 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52037 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52038 + return 1;
52039 + }
52040 +
52041 + return 1;
52042 +}
52043 +
52044 +int
52045 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52046 +{
52047 + __u32 mode;
52048 +
52049 + if (unlikely(!file || !(prot & PROT_EXEC)))
52050 + return 1;
52051 +
52052 + if (is_writable_mmap(file))
52053 + return 0;
52054 +
52055 + mode =
52056 + gr_search_file(file->f_path.dentry,
52057 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52058 + file->f_path.mnt);
52059 +
52060 + if (!gr_tpe_allow(file))
52061 + return 0;
52062 +
52063 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52064 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52065 + return 0;
52066 + } else if (unlikely(!(mode & GR_EXEC))) {
52067 + return 0;
52068 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52069 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52070 + return 1;
52071 + }
52072 +
52073 + return 1;
52074 +}
52075 +
52076 +void
52077 +gr_acl_handle_psacct(struct task_struct *task, const long code)
52078 +{
52079 + unsigned long runtime;
52080 + unsigned long cputime;
52081 + unsigned int wday, cday;
52082 + __u8 whr, chr;
52083 + __u8 wmin, cmin;
52084 + __u8 wsec, csec;
52085 + struct timespec timeval;
52086 +
52087 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52088 + !(task->acl->mode & GR_PROCACCT)))
52089 + return;
52090 +
52091 + do_posix_clock_monotonic_gettime(&timeval);
52092 + runtime = timeval.tv_sec - task->start_time.tv_sec;
52093 + wday = runtime / (3600 * 24);
52094 + runtime -= wday * (3600 * 24);
52095 + whr = runtime / 3600;
52096 + runtime -= whr * 3600;
52097 + wmin = runtime / 60;
52098 + runtime -= wmin * 60;
52099 + wsec = runtime;
52100 +
52101 + cputime = (task->utime + task->stime) / HZ;
52102 + cday = cputime / (3600 * 24);
52103 + cputime -= cday * (3600 * 24);
52104 + chr = cputime / 3600;
52105 + cputime -= chr * 3600;
52106 + cmin = cputime / 60;
52107 + cputime -= cmin * 60;
52108 + csec = cputime;
52109 +
52110 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52111 +
52112 + return;
52113 +}
52114 +
52115 +void gr_set_kernel_label(struct task_struct *task)
52116 +{
52117 + if (gr_status & GR_READY) {
52118 + task->role = kernel_role;
52119 + task->acl = kernel_role->root_label;
52120 + }
52121 + return;
52122 +}
52123 +
52124 +#ifdef CONFIG_TASKSTATS
52125 +int gr_is_taskstats_denied(int pid)
52126 +{
52127 + struct task_struct *task;
52128 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52129 + const struct cred *cred;
52130 +#endif
52131 + int ret = 0;
52132 +
52133 + /* restrict taskstats viewing to un-chrooted root users
52134 + who have the 'view' subject flag if the RBAC system is enabled
52135 + */
52136 +
52137 + rcu_read_lock();
52138 + read_lock(&tasklist_lock);
52139 + task = find_task_by_vpid(pid);
52140 + if (task) {
52141 +#ifdef CONFIG_GRKERNSEC_CHROOT
52142 + if (proc_is_chrooted(task))
52143 + ret = -EACCES;
52144 +#endif
52145 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52146 + cred = __task_cred(task);
52147 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52148 + if (cred->uid != 0)
52149 + ret = -EACCES;
52150 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52151 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52152 + ret = -EACCES;
52153 +#endif
52154 +#endif
52155 + if (gr_status & GR_READY) {
52156 + if (!(task->acl->mode & GR_VIEW))
52157 + ret = -EACCES;
52158 + }
52159 + } else
52160 + ret = -ENOENT;
52161 +
52162 + read_unlock(&tasklist_lock);
52163 + rcu_read_unlock();
52164 +
52165 + return ret;
52166 +}
52167 +#endif
52168 +
52169 +/* AUXV entries are filled via a descendant of search_binary_handler
52170 + after we've already applied the subject for the target
52171 +*/
52172 +int gr_acl_enable_at_secure(void)
52173 +{
52174 + if (unlikely(!(gr_status & GR_READY)))
52175 + return 0;
52176 +
52177 + if (current->acl->mode & GR_ATSECURE)
52178 + return 1;
52179 +
52180 + return 0;
52181 +}
52182 +
52183 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52184 +{
52185 + struct task_struct *task = current;
52186 + struct dentry *dentry = file->f_path.dentry;
52187 + struct vfsmount *mnt = file->f_path.mnt;
52188 + struct acl_object_label *obj, *tmp;
52189 + struct acl_subject_label *subj;
52190 + unsigned int bufsize;
52191 + int is_not_root;
52192 + char *path;
52193 + dev_t dev = __get_dev(dentry);
52194 +
52195 + if (unlikely(!(gr_status & GR_READY)))
52196 + return 1;
52197 +
52198 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52199 + return 1;
52200 +
52201 + /* ignore Eric Biederman */
52202 + if (IS_PRIVATE(dentry->d_inode))
52203 + return 1;
52204 +
52205 + subj = task->acl;
52206 + do {
52207 + obj = lookup_acl_obj_label(ino, dev, subj);
52208 + if (obj != NULL)
52209 + return (obj->mode & GR_FIND) ? 1 : 0;
52210 + } while ((subj = subj->parent_subject));
52211 +
52212 + /* this is purely an optimization since we're looking for an object
52213 + for the directory we're doing a readdir on
52214 + if it's possible for any globbed object to match the entry we're
52215 + filling into the directory, then the object we find here will be
52216 + an anchor point with attached globbed objects
52217 + */
52218 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52219 + if (obj->globbed == NULL)
52220 + return (obj->mode & GR_FIND) ? 1 : 0;
52221 +
52222 + is_not_root = ((obj->filename[0] == '/') &&
52223 + (obj->filename[1] == '\0')) ? 0 : 1;
52224 + bufsize = PAGE_SIZE - namelen - is_not_root;
52225 +
52226 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
52227 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52228 + return 1;
52229 +
52230 + preempt_disable();
52231 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52232 + bufsize);
52233 +
52234 + bufsize = strlen(path);
52235 +
52236 + /* if base is "/", don't append an additional slash */
52237 + if (is_not_root)
52238 + *(path + bufsize) = '/';
52239 + memcpy(path + bufsize + is_not_root, name, namelen);
52240 + *(path + bufsize + namelen + is_not_root) = '\0';
52241 +
52242 + tmp = obj->globbed;
52243 + while (tmp) {
52244 + if (!glob_match(tmp->filename, path)) {
52245 + preempt_enable();
52246 + return (tmp->mode & GR_FIND) ? 1 : 0;
52247 + }
52248 + tmp = tmp->next;
52249 + }
52250 + preempt_enable();
52251 + return (obj->mode & GR_FIND) ? 1 : 0;
52252 +}
52253 +
52254 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52255 +EXPORT_SYMBOL(gr_acl_is_enabled);
52256 +#endif
52257 +EXPORT_SYMBOL(gr_learn_resource);
52258 +EXPORT_SYMBOL(gr_set_kernel_label);
52259 +#ifdef CONFIG_SECURITY
52260 +EXPORT_SYMBOL(gr_check_user_change);
52261 +EXPORT_SYMBOL(gr_check_group_change);
52262 +#endif
52263 +
52264 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52265 new file mode 100644
52266 index 0000000..34fefda
52267 --- /dev/null
52268 +++ b/grsecurity/gracl_alloc.c
52269 @@ -0,0 +1,105 @@
52270 +#include <linux/kernel.h>
52271 +#include <linux/mm.h>
52272 +#include <linux/slab.h>
52273 +#include <linux/vmalloc.h>
52274 +#include <linux/gracl.h>
52275 +#include <linux/grsecurity.h>
52276 +
52277 +static unsigned long alloc_stack_next = 1;
52278 +static unsigned long alloc_stack_size = 1;
52279 +static void **alloc_stack;
52280 +
52281 +static __inline__ int
52282 +alloc_pop(void)
52283 +{
52284 + if (alloc_stack_next == 1)
52285 + return 0;
52286 +
52287 + kfree(alloc_stack[alloc_stack_next - 2]);
52288 +
52289 + alloc_stack_next--;
52290 +
52291 + return 1;
52292 +}
52293 +
52294 +static __inline__ int
52295 +alloc_push(void *buf)
52296 +{
52297 + if (alloc_stack_next >= alloc_stack_size)
52298 + return 1;
52299 +
52300 + alloc_stack[alloc_stack_next - 1] = buf;
52301 +
52302 + alloc_stack_next++;
52303 +
52304 + return 0;
52305 +}
52306 +
52307 +void *
52308 +acl_alloc(unsigned long len)
52309 +{
52310 + void *ret = NULL;
52311 +
52312 + if (!len || len > PAGE_SIZE)
52313 + goto out;
52314 +
52315 + ret = kmalloc(len, GFP_KERNEL);
52316 +
52317 + if (ret) {
52318 + if (alloc_push(ret)) {
52319 + kfree(ret);
52320 + ret = NULL;
52321 + }
52322 + }
52323 +
52324 +out:
52325 + return ret;
52326 +}
52327 +
52328 +void *
52329 +acl_alloc_num(unsigned long num, unsigned long len)
52330 +{
52331 + if (!len || (num > (PAGE_SIZE / len)))
52332 + return NULL;
52333 +
52334 + return acl_alloc(num * len);
52335 +}
52336 +
52337 +void
52338 +acl_free_all(void)
52339 +{
52340 + if (gr_acl_is_enabled() || !alloc_stack)
52341 + return;
52342 +
52343 + while (alloc_pop()) ;
52344 +
52345 + if (alloc_stack) {
52346 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52347 + kfree(alloc_stack);
52348 + else
52349 + vfree(alloc_stack);
52350 + }
52351 +
52352 + alloc_stack = NULL;
52353 + alloc_stack_size = 1;
52354 + alloc_stack_next = 1;
52355 +
52356 + return;
52357 +}
52358 +
52359 +int
52360 +acl_alloc_stack_init(unsigned long size)
52361 +{
52362 + if ((size * sizeof (void *)) <= PAGE_SIZE)
52363 + alloc_stack =
52364 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52365 + else
52366 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
52367 +
52368 + alloc_stack_size = size;
52369 +
52370 + if (!alloc_stack)
52371 + return 0;
52372 + else
52373 + return 1;
52374 +}
52375 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52376 new file mode 100644
52377 index 0000000..955ddfb
52378 --- /dev/null
52379 +++ b/grsecurity/gracl_cap.c
52380 @@ -0,0 +1,101 @@
52381 +#include <linux/kernel.h>
52382 +#include <linux/module.h>
52383 +#include <linux/sched.h>
52384 +#include <linux/gracl.h>
52385 +#include <linux/grsecurity.h>
52386 +#include <linux/grinternal.h>
52387 +
52388 +extern const char *captab_log[];
52389 +extern int captab_log_entries;
52390 +
52391 +int
52392 +gr_acl_is_capable(const int cap)
52393 +{
52394 + struct task_struct *task = current;
52395 + const struct cred *cred = current_cred();
52396 + struct acl_subject_label *curracl;
52397 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52398 + kernel_cap_t cap_audit = __cap_empty_set;
52399 +
52400 + if (!gr_acl_is_enabled())
52401 + return 1;
52402 +
52403 + curracl = task->acl;
52404 +
52405 + cap_drop = curracl->cap_lower;
52406 + cap_mask = curracl->cap_mask;
52407 + cap_audit = curracl->cap_invert_audit;
52408 +
52409 + while ((curracl = curracl->parent_subject)) {
52410 + /* if the cap isn't specified in the current computed mask but is specified in the
52411 + current level subject, and is lowered in the current level subject, then add
52412 + it to the set of dropped capabilities
52413 + otherwise, add the current level subject's mask to the current computed mask
52414 + */
52415 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52416 + cap_raise(cap_mask, cap);
52417 + if (cap_raised(curracl->cap_lower, cap))
52418 + cap_raise(cap_drop, cap);
52419 + if (cap_raised(curracl->cap_invert_audit, cap))
52420 + cap_raise(cap_audit, cap);
52421 + }
52422 + }
52423 +
52424 + if (!cap_raised(cap_drop, cap)) {
52425 + if (cap_raised(cap_audit, cap))
52426 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52427 + return 1;
52428 + }
52429 +
52430 + curracl = task->acl;
52431 +
52432 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52433 + && cap_raised(cred->cap_effective, cap)) {
52434 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52435 + task->role->roletype, cred->uid,
52436 + cred->gid, task->exec_file ?
52437 + gr_to_filename(task->exec_file->f_path.dentry,
52438 + task->exec_file->f_path.mnt) : curracl->filename,
52439 + curracl->filename, 0UL,
52440 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52441 + return 1;
52442 + }
52443 +
52444 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52445 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52446 + return 0;
52447 +}
52448 +
52449 +int
52450 +gr_acl_is_capable_nolog(const int cap)
52451 +{
52452 + struct acl_subject_label *curracl;
52453 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52454 +
52455 + if (!gr_acl_is_enabled())
52456 + return 1;
52457 +
52458 + curracl = current->acl;
52459 +
52460 + cap_drop = curracl->cap_lower;
52461 + cap_mask = curracl->cap_mask;
52462 +
52463 + while ((curracl = curracl->parent_subject)) {
52464 + /* if the cap isn't specified in the current computed mask but is specified in the
52465 + current level subject, and is lowered in the current level subject, then add
52466 + it to the set of dropped capabilities
52467 + otherwise, add the current level subject's mask to the current computed mask
52468 + */
52469 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52470 + cap_raise(cap_mask, cap);
52471 + if (cap_raised(curracl->cap_lower, cap))
52472 + cap_raise(cap_drop, cap);
52473 + }
52474 + }
52475 +
52476 + if (!cap_raised(cap_drop, cap))
52477 + return 1;
52478 +
52479 + return 0;
52480 +}
52481 +
52482 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52483 new file mode 100644
52484 index 0000000..4eda5c3
52485 --- /dev/null
52486 +++ b/grsecurity/gracl_fs.c
52487 @@ -0,0 +1,433 @@
52488 +#include <linux/kernel.h>
52489 +#include <linux/sched.h>
52490 +#include <linux/types.h>
52491 +#include <linux/fs.h>
52492 +#include <linux/file.h>
52493 +#include <linux/stat.h>
52494 +#include <linux/grsecurity.h>
52495 +#include <linux/grinternal.h>
52496 +#include <linux/gracl.h>
52497 +
52498 +__u32
52499 +gr_acl_handle_hidden_file(const struct dentry * dentry,
52500 + const struct vfsmount * mnt)
52501 +{
52502 + __u32 mode;
52503 +
52504 + if (unlikely(!dentry->d_inode))
52505 + return GR_FIND;
52506 +
52507 + mode =
52508 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52509 +
52510 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52511 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52512 + return mode;
52513 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52514 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52515 + return 0;
52516 + } else if (unlikely(!(mode & GR_FIND)))
52517 + return 0;
52518 +
52519 + return GR_FIND;
52520 +}
52521 +
52522 +__u32
52523 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52524 + int acc_mode)
52525 +{
52526 + __u32 reqmode = GR_FIND;
52527 + __u32 mode;
52528 +
52529 + if (unlikely(!dentry->d_inode))
52530 + return reqmode;
52531 +
52532 + if (acc_mode & MAY_APPEND)
52533 + reqmode |= GR_APPEND;
52534 + else if (acc_mode & MAY_WRITE)
52535 + reqmode |= GR_WRITE;
52536 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52537 + reqmode |= GR_READ;
52538 +
52539 + mode =
52540 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52541 + mnt);
52542 +
52543 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52544 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52545 + reqmode & GR_READ ? " reading" : "",
52546 + reqmode & GR_WRITE ? " writing" : reqmode &
52547 + GR_APPEND ? " appending" : "");
52548 + return reqmode;
52549 + } else
52550 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52551 + {
52552 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52553 + reqmode & GR_READ ? " reading" : "",
52554 + reqmode & GR_WRITE ? " writing" : reqmode &
52555 + GR_APPEND ? " appending" : "");
52556 + return 0;
52557 + } else if (unlikely((mode & reqmode) != reqmode))
52558 + return 0;
52559 +
52560 + return reqmode;
52561 +}
52562 +
52563 +__u32
52564 +gr_acl_handle_creat(const struct dentry * dentry,
52565 + const struct dentry * p_dentry,
52566 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52567 + const int imode)
52568 +{
52569 + __u32 reqmode = GR_WRITE | GR_CREATE;
52570 + __u32 mode;
52571 +
52572 + if (acc_mode & MAY_APPEND)
52573 + reqmode |= GR_APPEND;
52574 + // if a directory was required or the directory already exists, then
52575 + // don't count this open as a read
52576 + if ((acc_mode & MAY_READ) &&
52577 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52578 + reqmode |= GR_READ;
52579 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52580 + reqmode |= GR_SETID;
52581 +
52582 + mode =
52583 + gr_check_create(dentry, p_dentry, p_mnt,
52584 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52585 +
52586 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52587 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52588 + reqmode & GR_READ ? " reading" : "",
52589 + reqmode & GR_WRITE ? " writing" : reqmode &
52590 + GR_APPEND ? " appending" : "");
52591 + return reqmode;
52592 + } else
52593 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52594 + {
52595 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52596 + reqmode & GR_READ ? " reading" : "",
52597 + reqmode & GR_WRITE ? " writing" : reqmode &
52598 + GR_APPEND ? " appending" : "");
52599 + return 0;
52600 + } else if (unlikely((mode & reqmode) != reqmode))
52601 + return 0;
52602 +
52603 + return reqmode;
52604 +}
52605 +
52606 +__u32
52607 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52608 + const int fmode)
52609 +{
52610 + __u32 mode, reqmode = GR_FIND;
52611 +
52612 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52613 + reqmode |= GR_EXEC;
52614 + if (fmode & S_IWOTH)
52615 + reqmode |= GR_WRITE;
52616 + if (fmode & S_IROTH)
52617 + reqmode |= GR_READ;
52618 +
52619 + mode =
52620 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52621 + mnt);
52622 +
52623 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52624 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52625 + reqmode & GR_READ ? " reading" : "",
52626 + reqmode & GR_WRITE ? " writing" : "",
52627 + reqmode & GR_EXEC ? " executing" : "");
52628 + return reqmode;
52629 + } else
52630 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52631 + {
52632 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52633 + reqmode & GR_READ ? " reading" : "",
52634 + reqmode & GR_WRITE ? " writing" : "",
52635 + reqmode & GR_EXEC ? " executing" : "");
52636 + return 0;
52637 + } else if (unlikely((mode & reqmode) != reqmode))
52638 + return 0;
52639 +
52640 + return reqmode;
52641 +}
52642 +
52643 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
52644 +{
52645 + __u32 mode;
52646 +
52647 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
52648 +
52649 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52650 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
52651 + return mode;
52652 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52653 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
52654 + return 0;
52655 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
52656 + return 0;
52657 +
52658 + return (reqmode);
52659 +}
52660 +
52661 +__u32
52662 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52663 +{
52664 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
52665 +}
52666 +
52667 +__u32
52668 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
52669 +{
52670 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
52671 +}
52672 +
52673 +__u32
52674 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
52675 +{
52676 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
52677 +}
52678 +
52679 +__u32
52680 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
52681 +{
52682 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
52683 +}
52684 +
52685 +__u32
52686 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
52687 + mode_t mode)
52688 +{
52689 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
52690 + return 1;
52691 +
52692 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52693 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52694 + GR_FCHMOD_ACL_MSG);
52695 + } else {
52696 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
52697 + }
52698 +}
52699 +
52700 +__u32
52701 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
52702 + mode_t mode)
52703 +{
52704 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52705 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52706 + GR_CHMOD_ACL_MSG);
52707 + } else {
52708 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
52709 + }
52710 +}
52711 +
52712 +__u32
52713 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
52714 +{
52715 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
52716 +}
52717 +
52718 +__u32
52719 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
52720 +{
52721 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
52722 +}
52723 +
52724 +__u32
52725 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
52726 +{
52727 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
52728 +}
52729 +
52730 +__u32
52731 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
52732 +{
52733 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
52734 + GR_UNIXCONNECT_ACL_MSG);
52735 +}
52736 +
52737 +/* hardlinks require at minimum create and link permission,
52738 + any additional privilege required is based on the
52739 + privilege of the file being linked to
52740 +*/
52741 +__u32
52742 +gr_acl_handle_link(const struct dentry * new_dentry,
52743 + const struct dentry * parent_dentry,
52744 + const struct vfsmount * parent_mnt,
52745 + const struct dentry * old_dentry,
52746 + const struct vfsmount * old_mnt, const char *to)
52747 +{
52748 + __u32 mode;
52749 + __u32 needmode = GR_CREATE | GR_LINK;
52750 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
52751 +
52752 + mode =
52753 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
52754 + old_mnt);
52755 +
52756 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
52757 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52758 + return mode;
52759 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52760 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52761 + return 0;
52762 + } else if (unlikely((mode & needmode) != needmode))
52763 + return 0;
52764 +
52765 + return 1;
52766 +}
52767 +
52768 +__u32
52769 +gr_acl_handle_symlink(const struct dentry * new_dentry,
52770 + const struct dentry * parent_dentry,
52771 + const struct vfsmount * parent_mnt, const char *from)
52772 +{
52773 + __u32 needmode = GR_WRITE | GR_CREATE;
52774 + __u32 mode;
52775 +
52776 + mode =
52777 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
52778 + GR_CREATE | GR_AUDIT_CREATE |
52779 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
52780 +
52781 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
52782 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52783 + return mode;
52784 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52785 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52786 + return 0;
52787 + } else if (unlikely((mode & needmode) != needmode))
52788 + return 0;
52789 +
52790 + return (GR_WRITE | GR_CREATE);
52791 +}
52792 +
52793 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
52794 +{
52795 + __u32 mode;
52796 +
52797 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52798 +
52799 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52800 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
52801 + return mode;
52802 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52803 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
52804 + return 0;
52805 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
52806 + return 0;
52807 +
52808 + return (reqmode);
52809 +}
52810 +
52811 +__u32
52812 +gr_acl_handle_mknod(const struct dentry * new_dentry,
52813 + const struct dentry * parent_dentry,
52814 + const struct vfsmount * parent_mnt,
52815 + const int mode)
52816 +{
52817 + __u32 reqmode = GR_WRITE | GR_CREATE;
52818 + if (unlikely(mode & (S_ISUID | S_ISGID)))
52819 + reqmode |= GR_SETID;
52820 +
52821 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52822 + reqmode, GR_MKNOD_ACL_MSG);
52823 +}
52824 +
52825 +__u32
52826 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
52827 + const struct dentry *parent_dentry,
52828 + const struct vfsmount *parent_mnt)
52829 +{
52830 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52831 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
52832 +}
52833 +
52834 +#define RENAME_CHECK_SUCCESS(old, new) \
52835 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
52836 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
52837 +
52838 +int
52839 +gr_acl_handle_rename(struct dentry *new_dentry,
52840 + struct dentry *parent_dentry,
52841 + const struct vfsmount *parent_mnt,
52842 + struct dentry *old_dentry,
52843 + struct inode *old_parent_inode,
52844 + struct vfsmount *old_mnt, const char *newname)
52845 +{
52846 + __u32 comp1, comp2;
52847 + int error = 0;
52848 +
52849 + if (unlikely(!gr_acl_is_enabled()))
52850 + return 0;
52851 +
52852 + if (!new_dentry->d_inode) {
52853 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
52854 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
52855 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
52856 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
52857 + GR_DELETE | GR_AUDIT_DELETE |
52858 + GR_AUDIT_READ | GR_AUDIT_WRITE |
52859 + GR_SUPPRESS, old_mnt);
52860 + } else {
52861 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
52862 + GR_CREATE | GR_DELETE |
52863 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
52864 + GR_AUDIT_READ | GR_AUDIT_WRITE |
52865 + GR_SUPPRESS, parent_mnt);
52866 + comp2 =
52867 + gr_search_file(old_dentry,
52868 + GR_READ | GR_WRITE | GR_AUDIT_READ |
52869 + GR_DELETE | GR_AUDIT_DELETE |
52870 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
52871 + }
52872 +
52873 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
52874 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
52875 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52876 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
52877 + && !(comp2 & GR_SUPPRESS)) {
52878 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52879 + error = -EACCES;
52880 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
52881 + error = -EACCES;
52882 +
52883 + return error;
52884 +}
52885 +
52886 +void
52887 +gr_acl_handle_exit(void)
52888 +{
52889 + u16 id;
52890 + char *rolename;
52891 + struct file *exec_file;
52892 +
52893 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
52894 + !(current->role->roletype & GR_ROLE_PERSIST))) {
52895 + id = current->acl_role_id;
52896 + rolename = current->role->rolename;
52897 + gr_set_acls(1);
52898 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
52899 + }
52900 +
52901 + write_lock(&grsec_exec_file_lock);
52902 + exec_file = current->exec_file;
52903 + current->exec_file = NULL;
52904 + write_unlock(&grsec_exec_file_lock);
52905 +
52906 + if (exec_file)
52907 + fput(exec_file);
52908 +}
52909 +
52910 +int
52911 +gr_acl_handle_procpidmem(const struct task_struct *task)
52912 +{
52913 + if (unlikely(!gr_acl_is_enabled()))
52914 + return 0;
52915 +
52916 + if (task != current && task->acl->mode & GR_PROTPROCFD)
52917 + return -EACCES;
52918 +
52919 + return 0;
52920 +}
52921 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
52922 new file mode 100644
52923 index 0000000..17050ca
52924 --- /dev/null
52925 +++ b/grsecurity/gracl_ip.c
52926 @@ -0,0 +1,381 @@
52927 +#include <linux/kernel.h>
52928 +#include <asm/uaccess.h>
52929 +#include <asm/errno.h>
52930 +#include <net/sock.h>
52931 +#include <linux/file.h>
52932 +#include <linux/fs.h>
52933 +#include <linux/net.h>
52934 +#include <linux/in.h>
52935 +#include <linux/skbuff.h>
52936 +#include <linux/ip.h>
52937 +#include <linux/udp.h>
52938 +#include <linux/types.h>
52939 +#include <linux/sched.h>
52940 +#include <linux/netdevice.h>
52941 +#include <linux/inetdevice.h>
52942 +#include <linux/gracl.h>
52943 +#include <linux/grsecurity.h>
52944 +#include <linux/grinternal.h>
52945 +
52946 +#define GR_BIND 0x01
52947 +#define GR_CONNECT 0x02
52948 +#define GR_INVERT 0x04
52949 +#define GR_BINDOVERRIDE 0x08
52950 +#define GR_CONNECTOVERRIDE 0x10
52951 +#define GR_SOCK_FAMILY 0x20
52952 +
52953 +static const char * gr_protocols[IPPROTO_MAX] = {
52954 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
52955 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
52956 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
52957 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
52958 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
52959 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
52960 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
52961 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
52962 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
52963 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
52964 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
52965 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
52966 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
52967 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
52968 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
52969 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
52970 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
52971 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
52972 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
52973 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
52974 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
52975 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
52976 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
52977 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
52978 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
52979 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
52980 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
52981 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
52982 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
52983 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
52984 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
52985 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
52986 + };
52987 +
52988 +static const char * gr_socktypes[SOCK_MAX] = {
52989 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
52990 + "unknown:7", "unknown:8", "unknown:9", "packet"
52991 + };
52992 +
52993 +static const char * gr_sockfamilies[AF_MAX+1] = {
52994 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
52995 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
52996 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
52997 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
52998 + };
52999 +
53000 +const char *
53001 +gr_proto_to_name(unsigned char proto)
53002 +{
53003 + return gr_protocols[proto];
53004 +}
53005 +
53006 +const char *
53007 +gr_socktype_to_name(unsigned char type)
53008 +{
53009 + return gr_socktypes[type];
53010 +}
53011 +
53012 +const char *
53013 +gr_sockfamily_to_name(unsigned char family)
53014 +{
53015 + return gr_sockfamilies[family];
53016 +}
53017 +
53018 +int
53019 +gr_search_socket(const int domain, const int type, const int protocol)
53020 +{
53021 + struct acl_subject_label *curr;
53022 + const struct cred *cred = current_cred();
53023 +
53024 + if (unlikely(!gr_acl_is_enabled()))
53025 + goto exit;
53026 +
53027 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
53028 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53029 + goto exit; // let the kernel handle it
53030 +
53031 + curr = current->acl;
53032 +
53033 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53034 + /* the family is allowed, if this is PF_INET allow it only if
53035 + the extra sock type/protocol checks pass */
53036 + if (domain == PF_INET)
53037 + goto inet_check;
53038 + goto exit;
53039 + } else {
53040 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53041 + __u32 fakeip = 0;
53042 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53043 + current->role->roletype, cred->uid,
53044 + cred->gid, current->exec_file ?
53045 + gr_to_filename(current->exec_file->f_path.dentry,
53046 + current->exec_file->f_path.mnt) :
53047 + curr->filename, curr->filename,
53048 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53049 + &current->signal->saved_ip);
53050 + goto exit;
53051 + }
53052 + goto exit_fail;
53053 + }
53054 +
53055 +inet_check:
53056 + /* the rest of this checking is for IPv4 only */
53057 + if (!curr->ips)
53058 + goto exit;
53059 +
53060 + if ((curr->ip_type & (1 << type)) &&
53061 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53062 + goto exit;
53063 +
53064 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53065 + /* we don't place acls on raw sockets , and sometimes
53066 + dgram/ip sockets are opened for ioctl and not
53067 + bind/connect, so we'll fake a bind learn log */
53068 + if (type == SOCK_RAW || type == SOCK_PACKET) {
53069 + __u32 fakeip = 0;
53070 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53071 + current->role->roletype, cred->uid,
53072 + cred->gid, current->exec_file ?
53073 + gr_to_filename(current->exec_file->f_path.dentry,
53074 + current->exec_file->f_path.mnt) :
53075 + curr->filename, curr->filename,
53076 + &fakeip, 0, type,
53077 + protocol, GR_CONNECT, &current->signal->saved_ip);
53078 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53079 + __u32 fakeip = 0;
53080 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53081 + current->role->roletype, cred->uid,
53082 + cred->gid, current->exec_file ?
53083 + gr_to_filename(current->exec_file->f_path.dentry,
53084 + current->exec_file->f_path.mnt) :
53085 + curr->filename, curr->filename,
53086 + &fakeip, 0, type,
53087 + protocol, GR_BIND, &current->signal->saved_ip);
53088 + }
53089 + /* we'll log when they use connect or bind */
53090 + goto exit;
53091 + }
53092 +
53093 +exit_fail:
53094 + if (domain == PF_INET)
53095 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53096 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
53097 + else
53098 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53099 + gr_socktype_to_name(type), protocol);
53100 +
53101 + return 0;
53102 +exit:
53103 + return 1;
53104 +}
53105 +
53106 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53107 +{
53108 + if ((ip->mode & mode) &&
53109 + (ip_port >= ip->low) &&
53110 + (ip_port <= ip->high) &&
53111 + ((ntohl(ip_addr) & our_netmask) ==
53112 + (ntohl(our_addr) & our_netmask))
53113 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53114 + && (ip->type & (1 << type))) {
53115 + if (ip->mode & GR_INVERT)
53116 + return 2; // specifically denied
53117 + else
53118 + return 1; // allowed
53119 + }
53120 +
53121 + return 0; // not specifically allowed, may continue parsing
53122 +}
53123 +
53124 +static int
53125 +gr_search_connectbind(const int full_mode, struct sock *sk,
53126 + struct sockaddr_in *addr, const int type)
53127 +{
53128 + char iface[IFNAMSIZ] = {0};
53129 + struct acl_subject_label *curr;
53130 + struct acl_ip_label *ip;
53131 + struct inet_sock *isk;
53132 + struct net_device *dev;
53133 + struct in_device *idev;
53134 + unsigned long i;
53135 + int ret;
53136 + int mode = full_mode & (GR_BIND | GR_CONNECT);
53137 + __u32 ip_addr = 0;
53138 + __u32 our_addr;
53139 + __u32 our_netmask;
53140 + char *p;
53141 + __u16 ip_port = 0;
53142 + const struct cred *cred = current_cred();
53143 +
53144 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53145 + return 0;
53146 +
53147 + curr = current->acl;
53148 + isk = inet_sk(sk);
53149 +
53150 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53151 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53152 + addr->sin_addr.s_addr = curr->inaddr_any_override;
53153 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53154 + struct sockaddr_in saddr;
53155 + int err;
53156 +
53157 + saddr.sin_family = AF_INET;
53158 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
53159 + saddr.sin_port = isk->inet_sport;
53160 +
53161 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53162 + if (err)
53163 + return err;
53164 +
53165 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53166 + if (err)
53167 + return err;
53168 + }
53169 +
53170 + if (!curr->ips)
53171 + return 0;
53172 +
53173 + ip_addr = addr->sin_addr.s_addr;
53174 + ip_port = ntohs(addr->sin_port);
53175 +
53176 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53177 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53178 + current->role->roletype, cred->uid,
53179 + cred->gid, current->exec_file ?
53180 + gr_to_filename(current->exec_file->f_path.dentry,
53181 + current->exec_file->f_path.mnt) :
53182 + curr->filename, curr->filename,
53183 + &ip_addr, ip_port, type,
53184 + sk->sk_protocol, mode, &current->signal->saved_ip);
53185 + return 0;
53186 + }
53187 +
53188 + for (i = 0; i < curr->ip_num; i++) {
53189 + ip = *(curr->ips + i);
53190 + if (ip->iface != NULL) {
53191 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
53192 + p = strchr(iface, ':');
53193 + if (p != NULL)
53194 + *p = '\0';
53195 + dev = dev_get_by_name(sock_net(sk), iface);
53196 + if (dev == NULL)
53197 + continue;
53198 + idev = in_dev_get(dev);
53199 + if (idev == NULL) {
53200 + dev_put(dev);
53201 + continue;
53202 + }
53203 + rcu_read_lock();
53204 + for_ifa(idev) {
53205 + if (!strcmp(ip->iface, ifa->ifa_label)) {
53206 + our_addr = ifa->ifa_address;
53207 + our_netmask = 0xffffffff;
53208 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53209 + if (ret == 1) {
53210 + rcu_read_unlock();
53211 + in_dev_put(idev);
53212 + dev_put(dev);
53213 + return 0;
53214 + } else if (ret == 2) {
53215 + rcu_read_unlock();
53216 + in_dev_put(idev);
53217 + dev_put(dev);
53218 + goto denied;
53219 + }
53220 + }
53221 + } endfor_ifa(idev);
53222 + rcu_read_unlock();
53223 + in_dev_put(idev);
53224 + dev_put(dev);
53225 + } else {
53226 + our_addr = ip->addr;
53227 + our_netmask = ip->netmask;
53228 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53229 + if (ret == 1)
53230 + return 0;
53231 + else if (ret == 2)
53232 + goto denied;
53233 + }
53234 + }
53235 +
53236 +denied:
53237 + if (mode == GR_BIND)
53238 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53239 + else if (mode == GR_CONNECT)
53240 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53241 +
53242 + return -EACCES;
53243 +}
53244 +
53245 +int
53246 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53247 +{
53248 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53249 +}
53250 +
53251 +int
53252 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53253 +{
53254 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53255 +}
53256 +
53257 +int gr_search_listen(struct socket *sock)
53258 +{
53259 + struct sock *sk = sock->sk;
53260 + struct sockaddr_in addr;
53261 +
53262 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53263 + addr.sin_port = inet_sk(sk)->inet_sport;
53264 +
53265 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53266 +}
53267 +
53268 +int gr_search_accept(struct socket *sock)
53269 +{
53270 + struct sock *sk = sock->sk;
53271 + struct sockaddr_in addr;
53272 +
53273 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53274 + addr.sin_port = inet_sk(sk)->inet_sport;
53275 +
53276 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53277 +}
53278 +
53279 +int
53280 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53281 +{
53282 + if (addr)
53283 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53284 + else {
53285 + struct sockaddr_in sin;
53286 + const struct inet_sock *inet = inet_sk(sk);
53287 +
53288 + sin.sin_addr.s_addr = inet->inet_daddr;
53289 + sin.sin_port = inet->inet_dport;
53290 +
53291 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53292 + }
53293 +}
53294 +
53295 +int
53296 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53297 +{
53298 + struct sockaddr_in sin;
53299 +
53300 + if (unlikely(skb->len < sizeof (struct udphdr)))
53301 + return 0; // skip this packet
53302 +
53303 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53304 + sin.sin_port = udp_hdr(skb)->source;
53305 +
53306 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53307 +}
53308 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53309 new file mode 100644
53310 index 0000000..25f54ef
53311 --- /dev/null
53312 +++ b/grsecurity/gracl_learn.c
53313 @@ -0,0 +1,207 @@
53314 +#include <linux/kernel.h>
53315 +#include <linux/mm.h>
53316 +#include <linux/sched.h>
53317 +#include <linux/poll.h>
53318 +#include <linux/string.h>
53319 +#include <linux/file.h>
53320 +#include <linux/types.h>
53321 +#include <linux/vmalloc.h>
53322 +#include <linux/grinternal.h>
53323 +
53324 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53325 + size_t count, loff_t *ppos);
53326 +extern int gr_acl_is_enabled(void);
53327 +
53328 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53329 +static int gr_learn_attached;
53330 +
53331 +/* use a 512k buffer */
53332 +#define LEARN_BUFFER_SIZE (512 * 1024)
53333 +
53334 +static DEFINE_SPINLOCK(gr_learn_lock);
53335 +static DEFINE_MUTEX(gr_learn_user_mutex);
53336 +
53337 +/* we need to maintain two buffers, so that the kernel context of grlearn
53338 + uses a semaphore around the userspace copying, and the other kernel contexts
53339 + use a spinlock when copying into the buffer, since they cannot sleep
53340 +*/
53341 +static char *learn_buffer;
53342 +static char *learn_buffer_user;
53343 +static int learn_buffer_len;
53344 +static int learn_buffer_user_len;
53345 +
53346 +static ssize_t
53347 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53348 +{
53349 + DECLARE_WAITQUEUE(wait, current);
53350 + ssize_t retval = 0;
53351 +
53352 + add_wait_queue(&learn_wait, &wait);
53353 + set_current_state(TASK_INTERRUPTIBLE);
53354 + do {
53355 + mutex_lock(&gr_learn_user_mutex);
53356 + spin_lock(&gr_learn_lock);
53357 + if (learn_buffer_len)
53358 + break;
53359 + spin_unlock(&gr_learn_lock);
53360 + mutex_unlock(&gr_learn_user_mutex);
53361 + if (file->f_flags & O_NONBLOCK) {
53362 + retval = -EAGAIN;
53363 + goto out;
53364 + }
53365 + if (signal_pending(current)) {
53366 + retval = -ERESTARTSYS;
53367 + goto out;
53368 + }
53369 +
53370 + schedule();
53371 + } while (1);
53372 +
53373 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53374 + learn_buffer_user_len = learn_buffer_len;
53375 + retval = learn_buffer_len;
53376 + learn_buffer_len = 0;
53377 +
53378 + spin_unlock(&gr_learn_lock);
53379 +
53380 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53381 + retval = -EFAULT;
53382 +
53383 + mutex_unlock(&gr_learn_user_mutex);
53384 +out:
53385 + set_current_state(TASK_RUNNING);
53386 + remove_wait_queue(&learn_wait, &wait);
53387 + return retval;
53388 +}
53389 +
53390 +static unsigned int
53391 +poll_learn(struct file * file, poll_table * wait)
53392 +{
53393 + poll_wait(file, &learn_wait, wait);
53394 +
53395 + if (learn_buffer_len)
53396 + return (POLLIN | POLLRDNORM);
53397 +
53398 + return 0;
53399 +}
53400 +
53401 +void
53402 +gr_clear_learn_entries(void)
53403 +{
53404 + char *tmp;
53405 +
53406 + mutex_lock(&gr_learn_user_mutex);
53407 + spin_lock(&gr_learn_lock);
53408 + tmp = learn_buffer;
53409 + learn_buffer = NULL;
53410 + spin_unlock(&gr_learn_lock);
53411 + if (tmp)
53412 + vfree(tmp);
53413 + if (learn_buffer_user != NULL) {
53414 + vfree(learn_buffer_user);
53415 + learn_buffer_user = NULL;
53416 + }
53417 + learn_buffer_len = 0;
53418 + mutex_unlock(&gr_learn_user_mutex);
53419 +
53420 + return;
53421 +}
53422 +
53423 +void
53424 +gr_add_learn_entry(const char *fmt, ...)
53425 +{
53426 + va_list args;
53427 + unsigned int len;
53428 +
53429 + if (!gr_learn_attached)
53430 + return;
53431 +
53432 + spin_lock(&gr_learn_lock);
53433 +
53434 + /* leave a gap at the end so we know when it's "full" but don't have to
53435 + compute the exact length of the string we're trying to append
53436 + */
53437 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53438 + spin_unlock(&gr_learn_lock);
53439 + wake_up_interruptible(&learn_wait);
53440 + return;
53441 + }
53442 + if (learn_buffer == NULL) {
53443 + spin_unlock(&gr_learn_lock);
53444 + return;
53445 + }
53446 +
53447 + va_start(args, fmt);
53448 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53449 + va_end(args);
53450 +
53451 + learn_buffer_len += len + 1;
53452 +
53453 + spin_unlock(&gr_learn_lock);
53454 + wake_up_interruptible(&learn_wait);
53455 +
53456 + return;
53457 +}
53458 +
53459 +static int
53460 +open_learn(struct inode *inode, struct file *file)
53461 +{
53462 + if (file->f_mode & FMODE_READ && gr_learn_attached)
53463 + return -EBUSY;
53464 + if (file->f_mode & FMODE_READ) {
53465 + int retval = 0;
53466 + mutex_lock(&gr_learn_user_mutex);
53467 + if (learn_buffer == NULL)
53468 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53469 + if (learn_buffer_user == NULL)
53470 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53471 + if (learn_buffer == NULL) {
53472 + retval = -ENOMEM;
53473 + goto out_error;
53474 + }
53475 + if (learn_buffer_user == NULL) {
53476 + retval = -ENOMEM;
53477 + goto out_error;
53478 + }
53479 + learn_buffer_len = 0;
53480 + learn_buffer_user_len = 0;
53481 + gr_learn_attached = 1;
53482 +out_error:
53483 + mutex_unlock(&gr_learn_user_mutex);
53484 + return retval;
53485 + }
53486 + return 0;
53487 +}
53488 +
53489 +static int
53490 +close_learn(struct inode *inode, struct file *file)
53491 +{
53492 + if (file->f_mode & FMODE_READ) {
53493 + char *tmp = NULL;
53494 + mutex_lock(&gr_learn_user_mutex);
53495 + spin_lock(&gr_learn_lock);
53496 + tmp = learn_buffer;
53497 + learn_buffer = NULL;
53498 + spin_unlock(&gr_learn_lock);
53499 + if (tmp)
53500 + vfree(tmp);
53501 + if (learn_buffer_user != NULL) {
53502 + vfree(learn_buffer_user);
53503 + learn_buffer_user = NULL;
53504 + }
53505 + learn_buffer_len = 0;
53506 + learn_buffer_user_len = 0;
53507 + gr_learn_attached = 0;
53508 + mutex_unlock(&gr_learn_user_mutex);
53509 + }
53510 +
53511 + return 0;
53512 +}
53513 +
53514 +const struct file_operations grsec_fops = {
53515 + .read = read_learn,
53516 + .write = write_grsec_handler,
53517 + .open = open_learn,
53518 + .release = close_learn,
53519 + .poll = poll_learn,
53520 +};
53521 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53522 new file mode 100644
53523 index 0000000..39645c9
53524 --- /dev/null
53525 +++ b/grsecurity/gracl_res.c
53526 @@ -0,0 +1,68 @@
53527 +#include <linux/kernel.h>
53528 +#include <linux/sched.h>
53529 +#include <linux/gracl.h>
53530 +#include <linux/grinternal.h>
53531 +
53532 +static const char *restab_log[] = {
53533 + [RLIMIT_CPU] = "RLIMIT_CPU",
53534 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53535 + [RLIMIT_DATA] = "RLIMIT_DATA",
53536 + [RLIMIT_STACK] = "RLIMIT_STACK",
53537 + [RLIMIT_CORE] = "RLIMIT_CORE",
53538 + [RLIMIT_RSS] = "RLIMIT_RSS",
53539 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
53540 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53541 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53542 + [RLIMIT_AS] = "RLIMIT_AS",
53543 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53544 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53545 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53546 + [RLIMIT_NICE] = "RLIMIT_NICE",
53547 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53548 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53549 + [GR_CRASH_RES] = "RLIMIT_CRASH"
53550 +};
53551 +
53552 +void
53553 +gr_log_resource(const struct task_struct *task,
53554 + const int res, const unsigned long wanted, const int gt)
53555 +{
53556 + const struct cred *cred;
53557 + unsigned long rlim;
53558 +
53559 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
53560 + return;
53561 +
53562 + // not yet supported resource
53563 + if (unlikely(!restab_log[res]))
53564 + return;
53565 +
53566 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53567 + rlim = task_rlimit_max(task, res);
53568 + else
53569 + rlim = task_rlimit(task, res);
53570 +
53571 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53572 + return;
53573 +
53574 + rcu_read_lock();
53575 + cred = __task_cred(task);
53576 +
53577 + if (res == RLIMIT_NPROC &&
53578 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53579 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53580 + goto out_rcu_unlock;
53581 + else if (res == RLIMIT_MEMLOCK &&
53582 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53583 + goto out_rcu_unlock;
53584 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53585 + goto out_rcu_unlock;
53586 + rcu_read_unlock();
53587 +
53588 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53589 +
53590 + return;
53591 +out_rcu_unlock:
53592 + rcu_read_unlock();
53593 + return;
53594 +}
53595 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53596 new file mode 100644
53597 index 0000000..5556be3
53598 --- /dev/null
53599 +++ b/grsecurity/gracl_segv.c
53600 @@ -0,0 +1,299 @@
53601 +#include <linux/kernel.h>
53602 +#include <linux/mm.h>
53603 +#include <asm/uaccess.h>
53604 +#include <asm/errno.h>
53605 +#include <asm/mman.h>
53606 +#include <net/sock.h>
53607 +#include <linux/file.h>
53608 +#include <linux/fs.h>
53609 +#include <linux/net.h>
53610 +#include <linux/in.h>
53611 +#include <linux/slab.h>
53612 +#include <linux/types.h>
53613 +#include <linux/sched.h>
53614 +#include <linux/timer.h>
53615 +#include <linux/gracl.h>
53616 +#include <linux/grsecurity.h>
53617 +#include <linux/grinternal.h>
53618 +
53619 +static struct crash_uid *uid_set;
53620 +static unsigned short uid_used;
53621 +static DEFINE_SPINLOCK(gr_uid_lock);
53622 +extern rwlock_t gr_inode_lock;
53623 +extern struct acl_subject_label *
53624 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53625 + struct acl_role_label *role);
53626 +
53627 +#ifdef CONFIG_BTRFS_FS
53628 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53629 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53630 +#endif
53631 +
53632 +static inline dev_t __get_dev(const struct dentry *dentry)
53633 +{
53634 +#ifdef CONFIG_BTRFS_FS
53635 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53636 + return get_btrfs_dev_from_inode(dentry->d_inode);
53637 + else
53638 +#endif
53639 + return dentry->d_inode->i_sb->s_dev;
53640 +}
53641 +
53642 +int
53643 +gr_init_uidset(void)
53644 +{
53645 + uid_set =
53646 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
53647 + uid_used = 0;
53648 +
53649 + return uid_set ? 1 : 0;
53650 +}
53651 +
53652 +void
53653 +gr_free_uidset(void)
53654 +{
53655 + if (uid_set)
53656 + kfree(uid_set);
53657 +
53658 + return;
53659 +}
53660 +
53661 +int
53662 +gr_find_uid(const uid_t uid)
53663 +{
53664 + struct crash_uid *tmp = uid_set;
53665 + uid_t buid;
53666 + int low = 0, high = uid_used - 1, mid;
53667 +
53668 + while (high >= low) {
53669 + mid = (low + high) >> 1;
53670 + buid = tmp[mid].uid;
53671 + if (buid == uid)
53672 + return mid;
53673 + if (buid > uid)
53674 + high = mid - 1;
53675 + if (buid < uid)
53676 + low = mid + 1;
53677 + }
53678 +
53679 + return -1;
53680 +}
53681 +
53682 +static __inline__ void
53683 +gr_insertsort(void)
53684 +{
53685 + unsigned short i, j;
53686 + struct crash_uid index;
53687 +
53688 + for (i = 1; i < uid_used; i++) {
53689 + index = uid_set[i];
53690 + j = i;
53691 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
53692 + uid_set[j] = uid_set[j - 1];
53693 + j--;
53694 + }
53695 + uid_set[j] = index;
53696 + }
53697 +
53698 + return;
53699 +}
53700 +
53701 +static __inline__ void
53702 +gr_insert_uid(const uid_t uid, const unsigned long expires)
53703 +{
53704 + int loc;
53705 +
53706 + if (uid_used == GR_UIDTABLE_MAX)
53707 + return;
53708 +
53709 + loc = gr_find_uid(uid);
53710 +
53711 + if (loc >= 0) {
53712 + uid_set[loc].expires = expires;
53713 + return;
53714 + }
53715 +
53716 + uid_set[uid_used].uid = uid;
53717 + uid_set[uid_used].expires = expires;
53718 + uid_used++;
53719 +
53720 + gr_insertsort();
53721 +
53722 + return;
53723 +}
53724 +
53725 +void
53726 +gr_remove_uid(const unsigned short loc)
53727 +{
53728 + unsigned short i;
53729 +
53730 + for (i = loc + 1; i < uid_used; i++)
53731 + uid_set[i - 1] = uid_set[i];
53732 +
53733 + uid_used--;
53734 +
53735 + return;
53736 +}
53737 +
53738 +int
53739 +gr_check_crash_uid(const uid_t uid)
53740 +{
53741 + int loc;
53742 + int ret = 0;
53743 +
53744 + if (unlikely(!gr_acl_is_enabled()))
53745 + return 0;
53746 +
53747 + spin_lock(&gr_uid_lock);
53748 + loc = gr_find_uid(uid);
53749 +
53750 + if (loc < 0)
53751 + goto out_unlock;
53752 +
53753 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
53754 + gr_remove_uid(loc);
53755 + else
53756 + ret = 1;
53757 +
53758 +out_unlock:
53759 + spin_unlock(&gr_uid_lock);
53760 + return ret;
53761 +}
53762 +
53763 +static __inline__ int
53764 +proc_is_setxid(const struct cred *cred)
53765 +{
53766 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
53767 + cred->uid != cred->fsuid)
53768 + return 1;
53769 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
53770 + cred->gid != cred->fsgid)
53771 + return 1;
53772 +
53773 + return 0;
53774 +}
53775 +
53776 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
53777 +
53778 +void
53779 +gr_handle_crash(struct task_struct *task, const int sig)
53780 +{
53781 + struct acl_subject_label *curr;
53782 + struct task_struct *tsk, *tsk2;
53783 + const struct cred *cred;
53784 + const struct cred *cred2;
53785 +
53786 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
53787 + return;
53788 +
53789 + if (unlikely(!gr_acl_is_enabled()))
53790 + return;
53791 +
53792 + curr = task->acl;
53793 +
53794 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
53795 + return;
53796 +
53797 + if (time_before_eq(curr->expires, get_seconds())) {
53798 + curr->expires = 0;
53799 + curr->crashes = 0;
53800 + }
53801 +
53802 + curr->crashes++;
53803 +
53804 + if (!curr->expires)
53805 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
53806 +
53807 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53808 + time_after(curr->expires, get_seconds())) {
53809 + rcu_read_lock();
53810 + cred = __task_cred(task);
53811 + if (cred->uid && proc_is_setxid(cred)) {
53812 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53813 + spin_lock(&gr_uid_lock);
53814 + gr_insert_uid(cred->uid, curr->expires);
53815 + spin_unlock(&gr_uid_lock);
53816 + curr->expires = 0;
53817 + curr->crashes = 0;
53818 + read_lock(&tasklist_lock);
53819 + do_each_thread(tsk2, tsk) {
53820 + cred2 = __task_cred(tsk);
53821 + if (tsk != task && cred2->uid == cred->uid)
53822 + gr_fake_force_sig(SIGKILL, tsk);
53823 + } while_each_thread(tsk2, tsk);
53824 + read_unlock(&tasklist_lock);
53825 + } else {
53826 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53827 + read_lock(&tasklist_lock);
53828 + read_lock(&grsec_exec_file_lock);
53829 + do_each_thread(tsk2, tsk) {
53830 + if (likely(tsk != task)) {
53831 + // if this thread has the same subject as the one that triggered
53832 + // RES_CRASH and it's the same binary, kill it
53833 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
53834 + gr_fake_force_sig(SIGKILL, tsk);
53835 + }
53836 + } while_each_thread(tsk2, tsk);
53837 + read_unlock(&grsec_exec_file_lock);
53838 + read_unlock(&tasklist_lock);
53839 + }
53840 + rcu_read_unlock();
53841 + }
53842 +
53843 + return;
53844 +}
53845 +
53846 +int
53847 +gr_check_crash_exec(const struct file *filp)
53848 +{
53849 + struct acl_subject_label *curr;
53850 +
53851 + if (unlikely(!gr_acl_is_enabled()))
53852 + return 0;
53853 +
53854 + read_lock(&gr_inode_lock);
53855 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
53856 + __get_dev(filp->f_path.dentry),
53857 + current->role);
53858 + read_unlock(&gr_inode_lock);
53859 +
53860 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
53861 + (!curr->crashes && !curr->expires))
53862 + return 0;
53863 +
53864 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53865 + time_after(curr->expires, get_seconds()))
53866 + return 1;
53867 + else if (time_before_eq(curr->expires, get_seconds())) {
53868 + curr->crashes = 0;
53869 + curr->expires = 0;
53870 + }
53871 +
53872 + return 0;
53873 +}
53874 +
53875 +void
53876 +gr_handle_alertkill(struct task_struct *task)
53877 +{
53878 + struct acl_subject_label *curracl;
53879 + __u32 curr_ip;
53880 + struct task_struct *p, *p2;
53881 +
53882 + if (unlikely(!gr_acl_is_enabled()))
53883 + return;
53884 +
53885 + curracl = task->acl;
53886 + curr_ip = task->signal->curr_ip;
53887 +
53888 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
53889 + read_lock(&tasklist_lock);
53890 + do_each_thread(p2, p) {
53891 + if (p->signal->curr_ip == curr_ip)
53892 + gr_fake_force_sig(SIGKILL, p);
53893 + } while_each_thread(p2, p);
53894 + read_unlock(&tasklist_lock);
53895 + } else if (curracl->mode & GR_KILLPROC)
53896 + gr_fake_force_sig(SIGKILL, task);
53897 +
53898 + return;
53899 +}
53900 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
53901 new file mode 100644
53902 index 0000000..9d83a69
53903 --- /dev/null
53904 +++ b/grsecurity/gracl_shm.c
53905 @@ -0,0 +1,40 @@
53906 +#include <linux/kernel.h>
53907 +#include <linux/mm.h>
53908 +#include <linux/sched.h>
53909 +#include <linux/file.h>
53910 +#include <linux/ipc.h>
53911 +#include <linux/gracl.h>
53912 +#include <linux/grsecurity.h>
53913 +#include <linux/grinternal.h>
53914 +
53915 +int
53916 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53917 + const time_t shm_createtime, const uid_t cuid, const int shmid)
53918 +{
53919 + struct task_struct *task;
53920 +
53921 + if (!gr_acl_is_enabled())
53922 + return 1;
53923 +
53924 + rcu_read_lock();
53925 + read_lock(&tasklist_lock);
53926 +
53927 + task = find_task_by_vpid(shm_cprid);
53928 +
53929 + if (unlikely(!task))
53930 + task = find_task_by_vpid(shm_lapid);
53931 +
53932 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
53933 + (task->pid == shm_lapid)) &&
53934 + (task->acl->mode & GR_PROTSHM) &&
53935 + (task->acl != current->acl))) {
53936 + read_unlock(&tasklist_lock);
53937 + rcu_read_unlock();
53938 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
53939 + return 0;
53940 + }
53941 + read_unlock(&tasklist_lock);
53942 + rcu_read_unlock();
53943 +
53944 + return 1;
53945 +}
53946 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
53947 new file mode 100644
53948 index 0000000..bc0be01
53949 --- /dev/null
53950 +++ b/grsecurity/grsec_chdir.c
53951 @@ -0,0 +1,19 @@
53952 +#include <linux/kernel.h>
53953 +#include <linux/sched.h>
53954 +#include <linux/fs.h>
53955 +#include <linux/file.h>
53956 +#include <linux/grsecurity.h>
53957 +#include <linux/grinternal.h>
53958 +
53959 +void
53960 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
53961 +{
53962 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53963 + if ((grsec_enable_chdir && grsec_enable_group &&
53964 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
53965 + !grsec_enable_group)) {
53966 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
53967 + }
53968 +#endif
53969 + return;
53970 +}
53971 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
53972 new file mode 100644
53973 index 0000000..a2dc675
53974 --- /dev/null
53975 +++ b/grsecurity/grsec_chroot.c
53976 @@ -0,0 +1,351 @@
53977 +#include <linux/kernel.h>
53978 +#include <linux/module.h>
53979 +#include <linux/sched.h>
53980 +#include <linux/file.h>
53981 +#include <linux/fs.h>
53982 +#include <linux/mount.h>
53983 +#include <linux/types.h>
53984 +#include <linux/pid_namespace.h>
53985 +#include <linux/grsecurity.h>
53986 +#include <linux/grinternal.h>
53987 +
53988 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
53989 +{
53990 +#ifdef CONFIG_GRKERNSEC
53991 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
53992 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
53993 + task->gr_is_chrooted = 1;
53994 + else
53995 + task->gr_is_chrooted = 0;
53996 +
53997 + task->gr_chroot_dentry = path->dentry;
53998 +#endif
53999 + return;
54000 +}
54001 +
54002 +void gr_clear_chroot_entries(struct task_struct *task)
54003 +{
54004 +#ifdef CONFIG_GRKERNSEC
54005 + task->gr_is_chrooted = 0;
54006 + task->gr_chroot_dentry = NULL;
54007 +#endif
54008 + return;
54009 +}
54010 +
54011 +int
54012 +gr_handle_chroot_unix(const pid_t pid)
54013 +{
54014 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54015 + struct task_struct *p;
54016 +
54017 + if (unlikely(!grsec_enable_chroot_unix))
54018 + return 1;
54019 +
54020 + if (likely(!proc_is_chrooted(current)))
54021 + return 1;
54022 +
54023 + rcu_read_lock();
54024 + read_lock(&tasklist_lock);
54025 + p = find_task_by_vpid_unrestricted(pid);
54026 + if (unlikely(p && !have_same_root(current, p))) {
54027 + read_unlock(&tasklist_lock);
54028 + rcu_read_unlock();
54029 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54030 + return 0;
54031 + }
54032 + read_unlock(&tasklist_lock);
54033 + rcu_read_unlock();
54034 +#endif
54035 + return 1;
54036 +}
54037 +
54038 +int
54039 +gr_handle_chroot_nice(void)
54040 +{
54041 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54042 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54043 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54044 + return -EPERM;
54045 + }
54046 +#endif
54047 + return 0;
54048 +}
54049 +
54050 +int
54051 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54052 +{
54053 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54054 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54055 + && proc_is_chrooted(current)) {
54056 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54057 + return -EACCES;
54058 + }
54059 +#endif
54060 + return 0;
54061 +}
54062 +
54063 +int
54064 +gr_handle_chroot_rawio(const struct inode *inode)
54065 +{
54066 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54067 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54068 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54069 + return 1;
54070 +#endif
54071 + return 0;
54072 +}
54073 +
54074 +int
54075 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54076 +{
54077 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54078 + struct task_struct *p;
54079 + int ret = 0;
54080 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54081 + return ret;
54082 +
54083 + read_lock(&tasklist_lock);
54084 + do_each_pid_task(pid, type, p) {
54085 + if (!have_same_root(current, p)) {
54086 + ret = 1;
54087 + goto out;
54088 + }
54089 + } while_each_pid_task(pid, type, p);
54090 +out:
54091 + read_unlock(&tasklist_lock);
54092 + return ret;
54093 +#endif
54094 + return 0;
54095 +}
54096 +
54097 +int
54098 +gr_pid_is_chrooted(struct task_struct *p)
54099 +{
54100 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54101 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54102 + return 0;
54103 +
54104 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54105 + !have_same_root(current, p)) {
54106 + return 1;
54107 + }
54108 +#endif
54109 + return 0;
54110 +}
54111 +
54112 +EXPORT_SYMBOL(gr_pid_is_chrooted);
54113 +
54114 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54115 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54116 +{
54117 + struct path path, currentroot;
54118 + int ret = 0;
54119 +
54120 + path.dentry = (struct dentry *)u_dentry;
54121 + path.mnt = (struct vfsmount *)u_mnt;
54122 + get_fs_root(current->fs, &currentroot);
54123 + if (path_is_under(&path, &currentroot))
54124 + ret = 1;
54125 + path_put(&currentroot);
54126 +
54127 + return ret;
54128 +}
54129 +#endif
54130 +
54131 +int
54132 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54133 +{
54134 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54135 + if (!grsec_enable_chroot_fchdir)
54136 + return 1;
54137 +
54138 + if (!proc_is_chrooted(current))
54139 + return 1;
54140 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54141 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54142 + return 0;
54143 + }
54144 +#endif
54145 + return 1;
54146 +}
54147 +
54148 +int
54149 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54150 + const time_t shm_createtime)
54151 +{
54152 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54153 + struct task_struct *p;
54154 + time_t starttime;
54155 +
54156 + if (unlikely(!grsec_enable_chroot_shmat))
54157 + return 1;
54158 +
54159 + if (likely(!proc_is_chrooted(current)))
54160 + return 1;
54161 +
54162 + rcu_read_lock();
54163 + read_lock(&tasklist_lock);
54164 +
54165 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54166 + starttime = p->start_time.tv_sec;
54167 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54168 + if (have_same_root(current, p)) {
54169 + goto allow;
54170 + } else {
54171 + read_unlock(&tasklist_lock);
54172 + rcu_read_unlock();
54173 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54174 + return 0;
54175 + }
54176 + }
54177 + /* creator exited, pid reuse, fall through to next check */
54178 + }
54179 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54180 + if (unlikely(!have_same_root(current, p))) {
54181 + read_unlock(&tasklist_lock);
54182 + rcu_read_unlock();
54183 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54184 + return 0;
54185 + }
54186 + }
54187 +
54188 +allow:
54189 + read_unlock(&tasklist_lock);
54190 + rcu_read_unlock();
54191 +#endif
54192 + return 1;
54193 +}
54194 +
54195 +void
54196 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54197 +{
54198 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54199 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54200 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54201 +#endif
54202 + return;
54203 +}
54204 +
54205 +int
54206 +gr_handle_chroot_mknod(const struct dentry *dentry,
54207 + const struct vfsmount *mnt, const int mode)
54208 +{
54209 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54210 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54211 + proc_is_chrooted(current)) {
54212 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54213 + return -EPERM;
54214 + }
54215 +#endif
54216 + return 0;
54217 +}
54218 +
54219 +int
54220 +gr_handle_chroot_mount(const struct dentry *dentry,
54221 + const struct vfsmount *mnt, const char *dev_name)
54222 +{
54223 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54224 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54225 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54226 + return -EPERM;
54227 + }
54228 +#endif
54229 + return 0;
54230 +}
54231 +
54232 +int
54233 +gr_handle_chroot_pivot(void)
54234 +{
54235 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54236 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54237 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54238 + return -EPERM;
54239 + }
54240 +#endif
54241 + return 0;
54242 +}
54243 +
54244 +int
54245 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54246 +{
54247 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54248 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54249 + !gr_is_outside_chroot(dentry, mnt)) {
54250 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54251 + return -EPERM;
54252 + }
54253 +#endif
54254 + return 0;
54255 +}
54256 +
54257 +extern const char *captab_log[];
54258 +extern int captab_log_entries;
54259 +
54260 +int
54261 +gr_chroot_is_capable(const int cap)
54262 +{
54263 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54264 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54265 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54266 + if (cap_raised(chroot_caps, cap)) {
54267 + const struct cred *creds = current_cred();
54268 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54269 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54270 + }
54271 + return 0;
54272 + }
54273 + }
54274 +#endif
54275 + return 1;
54276 +}
54277 +
54278 +int
54279 +gr_chroot_is_capable_nolog(const int cap)
54280 +{
54281 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54282 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54283 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54284 + if (cap_raised(chroot_caps, cap)) {
54285 + return 0;
54286 + }
54287 + }
54288 +#endif
54289 + return 1;
54290 +}
54291 +
54292 +int
54293 +gr_handle_chroot_sysctl(const int op)
54294 +{
54295 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54296 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54297 + proc_is_chrooted(current))
54298 + return -EACCES;
54299 +#endif
54300 + return 0;
54301 +}
54302 +
54303 +void
54304 +gr_handle_chroot_chdir(struct path *path)
54305 +{
54306 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54307 + if (grsec_enable_chroot_chdir)
54308 + set_fs_pwd(current->fs, path);
54309 +#endif
54310 + return;
54311 +}
54312 +
54313 +int
54314 +gr_handle_chroot_chmod(const struct dentry *dentry,
54315 + const struct vfsmount *mnt, const int mode)
54316 +{
54317 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54318 + /* allow chmod +s on directories, but not files */
54319 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54320 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54321 + proc_is_chrooted(current)) {
54322 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54323 + return -EPERM;
54324 + }
54325 +#endif
54326 + return 0;
54327 +}
54328 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54329 new file mode 100644
54330 index 0000000..d81a586
54331 --- /dev/null
54332 +++ b/grsecurity/grsec_disabled.c
54333 @@ -0,0 +1,439 @@
54334 +#include <linux/kernel.h>
54335 +#include <linux/module.h>
54336 +#include <linux/sched.h>
54337 +#include <linux/file.h>
54338 +#include <linux/fs.h>
54339 +#include <linux/kdev_t.h>
54340 +#include <linux/net.h>
54341 +#include <linux/in.h>
54342 +#include <linux/ip.h>
54343 +#include <linux/skbuff.h>
54344 +#include <linux/sysctl.h>
54345 +
54346 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54347 +void
54348 +pax_set_initial_flags(struct linux_binprm *bprm)
54349 +{
54350 + return;
54351 +}
54352 +#endif
54353 +
54354 +#ifdef CONFIG_SYSCTL
54355 +__u32
54356 +gr_handle_sysctl(const struct ctl_table * table, const int op)
54357 +{
54358 + return 0;
54359 +}
54360 +#endif
54361 +
54362 +#ifdef CONFIG_TASKSTATS
54363 +int gr_is_taskstats_denied(int pid)
54364 +{
54365 + return 0;
54366 +}
54367 +#endif
54368 +
54369 +int
54370 +gr_acl_is_enabled(void)
54371 +{
54372 + return 0;
54373 +}
54374 +
54375 +void
54376 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54377 +{
54378 + return;
54379 +}
54380 +
54381 +int
54382 +gr_handle_rawio(const struct inode *inode)
54383 +{
54384 + return 0;
54385 +}
54386 +
54387 +void
54388 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54389 +{
54390 + return;
54391 +}
54392 +
54393 +int
54394 +gr_handle_ptrace(struct task_struct *task, const long request)
54395 +{
54396 + return 0;
54397 +}
54398 +
54399 +int
54400 +gr_handle_proc_ptrace(struct task_struct *task)
54401 +{
54402 + return 0;
54403 +}
54404 +
54405 +void
54406 +gr_learn_resource(const struct task_struct *task,
54407 + const int res, const unsigned long wanted, const int gt)
54408 +{
54409 + return;
54410 +}
54411 +
54412 +int
54413 +gr_set_acls(const int type)
54414 +{
54415 + return 0;
54416 +}
54417 +
54418 +int
54419 +gr_check_hidden_task(const struct task_struct *tsk)
54420 +{
54421 + return 0;
54422 +}
54423 +
54424 +int
54425 +gr_check_protected_task(const struct task_struct *task)
54426 +{
54427 + return 0;
54428 +}
54429 +
54430 +int
54431 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54432 +{
54433 + return 0;
54434 +}
54435 +
54436 +void
54437 +gr_copy_label(struct task_struct *tsk)
54438 +{
54439 + return;
54440 +}
54441 +
54442 +void
54443 +gr_set_pax_flags(struct task_struct *task)
54444 +{
54445 + return;
54446 +}
54447 +
54448 +int
54449 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54450 + const int unsafe_share)
54451 +{
54452 + return 0;
54453 +}
54454 +
54455 +void
54456 +gr_handle_delete(const ino_t ino, const dev_t dev)
54457 +{
54458 + return;
54459 +}
54460 +
54461 +void
54462 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54463 +{
54464 + return;
54465 +}
54466 +
54467 +void
54468 +gr_handle_crash(struct task_struct *task, const int sig)
54469 +{
54470 + return;
54471 +}
54472 +
54473 +int
54474 +gr_check_crash_exec(const struct file *filp)
54475 +{
54476 + return 0;
54477 +}
54478 +
54479 +int
54480 +gr_check_crash_uid(const uid_t uid)
54481 +{
54482 + return 0;
54483 +}
54484 +
54485 +void
54486 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54487 + struct dentry *old_dentry,
54488 + struct dentry *new_dentry,
54489 + struct vfsmount *mnt, const __u8 replace)
54490 +{
54491 + return;
54492 +}
54493 +
54494 +int
54495 +gr_search_socket(const int family, const int type, const int protocol)
54496 +{
54497 + return 1;
54498 +}
54499 +
54500 +int
54501 +gr_search_connectbind(const int mode, const struct socket *sock,
54502 + const struct sockaddr_in *addr)
54503 +{
54504 + return 0;
54505 +}
54506 +
54507 +void
54508 +gr_handle_alertkill(struct task_struct *task)
54509 +{
54510 + return;
54511 +}
54512 +
54513 +__u32
54514 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54515 +{
54516 + return 1;
54517 +}
54518 +
54519 +__u32
54520 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54521 + const struct vfsmount * mnt)
54522 +{
54523 + return 1;
54524 +}
54525 +
54526 +__u32
54527 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54528 + int acc_mode)
54529 +{
54530 + return 1;
54531 +}
54532 +
54533 +__u32
54534 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54535 +{
54536 + return 1;
54537 +}
54538 +
54539 +__u32
54540 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54541 +{
54542 + return 1;
54543 +}
54544 +
54545 +int
54546 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54547 + unsigned int *vm_flags)
54548 +{
54549 + return 1;
54550 +}
54551 +
54552 +__u32
54553 +gr_acl_handle_truncate(const struct dentry * dentry,
54554 + const struct vfsmount * mnt)
54555 +{
54556 + return 1;
54557 +}
54558 +
54559 +__u32
54560 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54561 +{
54562 + return 1;
54563 +}
54564 +
54565 +__u32
54566 +gr_acl_handle_access(const struct dentry * dentry,
54567 + const struct vfsmount * mnt, const int fmode)
54568 +{
54569 + return 1;
54570 +}
54571 +
54572 +__u32
54573 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
54574 + mode_t mode)
54575 +{
54576 + return 1;
54577 +}
54578 +
54579 +__u32
54580 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54581 + mode_t mode)
54582 +{
54583 + return 1;
54584 +}
54585 +
54586 +__u32
54587 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54588 +{
54589 + return 1;
54590 +}
54591 +
54592 +__u32
54593 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54594 +{
54595 + return 1;
54596 +}
54597 +
54598 +void
54599 +grsecurity_init(void)
54600 +{
54601 + return;
54602 +}
54603 +
54604 +__u32
54605 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54606 + const struct dentry * parent_dentry,
54607 + const struct vfsmount * parent_mnt,
54608 + const int mode)
54609 +{
54610 + return 1;
54611 +}
54612 +
54613 +__u32
54614 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
54615 + const struct dentry * parent_dentry,
54616 + const struct vfsmount * parent_mnt)
54617 +{
54618 + return 1;
54619 +}
54620 +
54621 +__u32
54622 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54623 + const struct dentry * parent_dentry,
54624 + const struct vfsmount * parent_mnt, const char *from)
54625 +{
54626 + return 1;
54627 +}
54628 +
54629 +__u32
54630 +gr_acl_handle_link(const struct dentry * new_dentry,
54631 + const struct dentry * parent_dentry,
54632 + const struct vfsmount * parent_mnt,
54633 + const struct dentry * old_dentry,
54634 + const struct vfsmount * old_mnt, const char *to)
54635 +{
54636 + return 1;
54637 +}
54638 +
54639 +int
54640 +gr_acl_handle_rename(const struct dentry *new_dentry,
54641 + const struct dentry *parent_dentry,
54642 + const struct vfsmount *parent_mnt,
54643 + const struct dentry *old_dentry,
54644 + const struct inode *old_parent_inode,
54645 + const struct vfsmount *old_mnt, const char *newname)
54646 +{
54647 + return 0;
54648 +}
54649 +
54650 +int
54651 +gr_acl_handle_filldir(const struct file *file, const char *name,
54652 + const int namelen, const ino_t ino)
54653 +{
54654 + return 1;
54655 +}
54656 +
54657 +int
54658 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54659 + const time_t shm_createtime, const uid_t cuid, const int shmid)
54660 +{
54661 + return 1;
54662 +}
54663 +
54664 +int
54665 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
54666 +{
54667 + return 0;
54668 +}
54669 +
54670 +int
54671 +gr_search_accept(const struct socket *sock)
54672 +{
54673 + return 0;
54674 +}
54675 +
54676 +int
54677 +gr_search_listen(const struct socket *sock)
54678 +{
54679 + return 0;
54680 +}
54681 +
54682 +int
54683 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
54684 +{
54685 + return 0;
54686 +}
54687 +
54688 +__u32
54689 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
54690 +{
54691 + return 1;
54692 +}
54693 +
54694 +__u32
54695 +gr_acl_handle_creat(const struct dentry * dentry,
54696 + const struct dentry * p_dentry,
54697 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54698 + const int imode)
54699 +{
54700 + return 1;
54701 +}
54702 +
54703 +void
54704 +gr_acl_handle_exit(void)
54705 +{
54706 + return;
54707 +}
54708 +
54709 +int
54710 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54711 +{
54712 + return 1;
54713 +}
54714 +
54715 +void
54716 +gr_set_role_label(const uid_t uid, const gid_t gid)
54717 +{
54718 + return;
54719 +}
54720 +
54721 +int
54722 +gr_acl_handle_procpidmem(const struct task_struct *task)
54723 +{
54724 + return 0;
54725 +}
54726 +
54727 +int
54728 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
54729 +{
54730 + return 0;
54731 +}
54732 +
54733 +int
54734 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
54735 +{
54736 + return 0;
54737 +}
54738 +
54739 +void
54740 +gr_set_kernel_label(struct task_struct *task)
54741 +{
54742 + return;
54743 +}
54744 +
54745 +int
54746 +gr_check_user_change(int real, int effective, int fs)
54747 +{
54748 + return 0;
54749 +}
54750 +
54751 +int
54752 +gr_check_group_change(int real, int effective, int fs)
54753 +{
54754 + return 0;
54755 +}
54756 +
54757 +int gr_acl_enable_at_secure(void)
54758 +{
54759 + return 0;
54760 +}
54761 +
54762 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
54763 +{
54764 + return dentry->d_inode->i_sb->s_dev;
54765 +}
54766 +
54767 +EXPORT_SYMBOL(gr_learn_resource);
54768 +EXPORT_SYMBOL(gr_set_kernel_label);
54769 +#ifdef CONFIG_SECURITY
54770 +EXPORT_SYMBOL(gr_check_user_change);
54771 +EXPORT_SYMBOL(gr_check_group_change);
54772 +#endif
54773 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
54774 new file mode 100644
54775 index 0000000..2b05ada
54776 --- /dev/null
54777 +++ b/grsecurity/grsec_exec.c
54778 @@ -0,0 +1,146 @@
54779 +#include <linux/kernel.h>
54780 +#include <linux/sched.h>
54781 +#include <linux/file.h>
54782 +#include <linux/binfmts.h>
54783 +#include <linux/fs.h>
54784 +#include <linux/types.h>
54785 +#include <linux/grdefs.h>
54786 +#include <linux/grsecurity.h>
54787 +#include <linux/grinternal.h>
54788 +#include <linux/capability.h>
54789 +#include <linux/module.h>
54790 +
54791 +#include <asm/uaccess.h>
54792 +
54793 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54794 +static char gr_exec_arg_buf[132];
54795 +static DEFINE_MUTEX(gr_exec_arg_mutex);
54796 +#endif
54797 +
54798 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
54799 +
54800 +void
54801 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
54802 +{
54803 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54804 + char *grarg = gr_exec_arg_buf;
54805 + unsigned int i, x, execlen = 0;
54806 + char c;
54807 +
54808 + if (!((grsec_enable_execlog && grsec_enable_group &&
54809 + in_group_p(grsec_audit_gid))
54810 + || (grsec_enable_execlog && !grsec_enable_group)))
54811 + return;
54812 +
54813 + mutex_lock(&gr_exec_arg_mutex);
54814 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
54815 +
54816 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
54817 + const char __user *p;
54818 + unsigned int len;
54819 +
54820 + p = get_user_arg_ptr(argv, i);
54821 + if (IS_ERR(p))
54822 + goto log;
54823 +
54824 + len = strnlen_user(p, 128 - execlen);
54825 + if (len > 128 - execlen)
54826 + len = 128 - execlen;
54827 + else if (len > 0)
54828 + len--;
54829 + if (copy_from_user(grarg + execlen, p, len))
54830 + goto log;
54831 +
54832 + /* rewrite unprintable characters */
54833 + for (x = 0; x < len; x++) {
54834 + c = *(grarg + execlen + x);
54835 + if (c < 32 || c > 126)
54836 + *(grarg + execlen + x) = ' ';
54837 + }
54838 +
54839 + execlen += len;
54840 + *(grarg + execlen) = ' ';
54841 + *(grarg + execlen + 1) = '\0';
54842 + execlen++;
54843 + }
54844 +
54845 + log:
54846 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
54847 + bprm->file->f_path.mnt, grarg);
54848 + mutex_unlock(&gr_exec_arg_mutex);
54849 +#endif
54850 + return;
54851 +}
54852 +
54853 +#ifdef CONFIG_GRKERNSEC
54854 +extern int gr_acl_is_capable(const int cap);
54855 +extern int gr_acl_is_capable_nolog(const int cap);
54856 +extern int gr_chroot_is_capable(const int cap);
54857 +extern int gr_chroot_is_capable_nolog(const int cap);
54858 +#endif
54859 +
54860 +const char *captab_log[] = {
54861 + "CAP_CHOWN",
54862 + "CAP_DAC_OVERRIDE",
54863 + "CAP_DAC_READ_SEARCH",
54864 + "CAP_FOWNER",
54865 + "CAP_FSETID",
54866 + "CAP_KILL",
54867 + "CAP_SETGID",
54868 + "CAP_SETUID",
54869 + "CAP_SETPCAP",
54870 + "CAP_LINUX_IMMUTABLE",
54871 + "CAP_NET_BIND_SERVICE",
54872 + "CAP_NET_BROADCAST",
54873 + "CAP_NET_ADMIN",
54874 + "CAP_NET_RAW",
54875 + "CAP_IPC_LOCK",
54876 + "CAP_IPC_OWNER",
54877 + "CAP_SYS_MODULE",
54878 + "CAP_SYS_RAWIO",
54879 + "CAP_SYS_CHROOT",
54880 + "CAP_SYS_PTRACE",
54881 + "CAP_SYS_PACCT",
54882 + "CAP_SYS_ADMIN",
54883 + "CAP_SYS_BOOT",
54884 + "CAP_SYS_NICE",
54885 + "CAP_SYS_RESOURCE",
54886 + "CAP_SYS_TIME",
54887 + "CAP_SYS_TTY_CONFIG",
54888 + "CAP_MKNOD",
54889 + "CAP_LEASE",
54890 + "CAP_AUDIT_WRITE",
54891 + "CAP_AUDIT_CONTROL",
54892 + "CAP_SETFCAP",
54893 + "CAP_MAC_OVERRIDE",
54894 + "CAP_MAC_ADMIN",
54895 + "CAP_SYSLOG",
54896 + "CAP_WAKE_ALARM"
54897 +};
54898 +
54899 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
54900 +
54901 +int gr_is_capable(const int cap)
54902 +{
54903 +#ifdef CONFIG_GRKERNSEC
54904 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
54905 + return 1;
54906 + return 0;
54907 +#else
54908 + return 1;
54909 +#endif
54910 +}
54911 +
54912 +int gr_is_capable_nolog(const int cap)
54913 +{
54914 +#ifdef CONFIG_GRKERNSEC
54915 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
54916 + return 1;
54917 + return 0;
54918 +#else
54919 + return 1;
54920 +#endif
54921 +}
54922 +
54923 +EXPORT_SYMBOL(gr_is_capable);
54924 +EXPORT_SYMBOL(gr_is_capable_nolog);
54925 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
54926 new file mode 100644
54927 index 0000000..d3ee748
54928 --- /dev/null
54929 +++ b/grsecurity/grsec_fifo.c
54930 @@ -0,0 +1,24 @@
54931 +#include <linux/kernel.h>
54932 +#include <linux/sched.h>
54933 +#include <linux/fs.h>
54934 +#include <linux/file.h>
54935 +#include <linux/grinternal.h>
54936 +
54937 +int
54938 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
54939 + const struct dentry *dir, const int flag, const int acc_mode)
54940 +{
54941 +#ifdef CONFIG_GRKERNSEC_FIFO
54942 + const struct cred *cred = current_cred();
54943 +
54944 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
54945 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
54946 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
54947 + (cred->fsuid != dentry->d_inode->i_uid)) {
54948 + if (!inode_permission(dentry->d_inode, acc_mode))
54949 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
54950 + return -EACCES;
54951 + }
54952 +#endif
54953 + return 0;
54954 +}
54955 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
54956 new file mode 100644
54957 index 0000000..8ca18bf
54958 --- /dev/null
54959 +++ b/grsecurity/grsec_fork.c
54960 @@ -0,0 +1,23 @@
54961 +#include <linux/kernel.h>
54962 +#include <linux/sched.h>
54963 +#include <linux/grsecurity.h>
54964 +#include <linux/grinternal.h>
54965 +#include <linux/errno.h>
54966 +
54967 +void
54968 +gr_log_forkfail(const int retval)
54969 +{
54970 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
54971 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
54972 + switch (retval) {
54973 + case -EAGAIN:
54974 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
54975 + break;
54976 + case -ENOMEM:
54977 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
54978 + break;
54979 + }
54980 + }
54981 +#endif
54982 + return;
54983 +}
54984 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
54985 new file mode 100644
54986 index 0000000..01ddde4
54987 --- /dev/null
54988 +++ b/grsecurity/grsec_init.c
54989 @@ -0,0 +1,277 @@
54990 +#include <linux/kernel.h>
54991 +#include <linux/sched.h>
54992 +#include <linux/mm.h>
54993 +#include <linux/gracl.h>
54994 +#include <linux/slab.h>
54995 +#include <linux/vmalloc.h>
54996 +#include <linux/percpu.h>
54997 +#include <linux/module.h>
54998 +
54999 +int grsec_enable_ptrace_readexec;
55000 +int grsec_enable_setxid;
55001 +int grsec_enable_brute;
55002 +int grsec_enable_link;
55003 +int grsec_enable_dmesg;
55004 +int grsec_enable_harden_ptrace;
55005 +int grsec_enable_fifo;
55006 +int grsec_enable_execlog;
55007 +int grsec_enable_signal;
55008 +int grsec_enable_forkfail;
55009 +int grsec_enable_audit_ptrace;
55010 +int grsec_enable_time;
55011 +int grsec_enable_audit_textrel;
55012 +int grsec_enable_group;
55013 +int grsec_audit_gid;
55014 +int grsec_enable_chdir;
55015 +int grsec_enable_mount;
55016 +int grsec_enable_rofs;
55017 +int grsec_enable_chroot_findtask;
55018 +int grsec_enable_chroot_mount;
55019 +int grsec_enable_chroot_shmat;
55020 +int grsec_enable_chroot_fchdir;
55021 +int grsec_enable_chroot_double;
55022 +int grsec_enable_chroot_pivot;
55023 +int grsec_enable_chroot_chdir;
55024 +int grsec_enable_chroot_chmod;
55025 +int grsec_enable_chroot_mknod;
55026 +int grsec_enable_chroot_nice;
55027 +int grsec_enable_chroot_execlog;
55028 +int grsec_enable_chroot_caps;
55029 +int grsec_enable_chroot_sysctl;
55030 +int grsec_enable_chroot_unix;
55031 +int grsec_enable_tpe;
55032 +int grsec_tpe_gid;
55033 +int grsec_enable_blackhole;
55034 +#ifdef CONFIG_IPV6_MODULE
55035 +EXPORT_SYMBOL(grsec_enable_blackhole);
55036 +#endif
55037 +int grsec_lastack_retries;
55038 +int grsec_enable_tpe_all;
55039 +int grsec_enable_tpe_invert;
55040 +int grsec_enable_socket_all;
55041 +int grsec_socket_all_gid;
55042 +int grsec_enable_socket_client;
55043 +int grsec_socket_client_gid;
55044 +int grsec_enable_socket_server;
55045 +int grsec_socket_server_gid;
55046 +int grsec_resource_logging;
55047 +int grsec_disable_privio;
55048 +int grsec_enable_log_rwxmaps;
55049 +int grsec_lock;
55050 +
55051 +DEFINE_SPINLOCK(grsec_alert_lock);
55052 +unsigned long grsec_alert_wtime = 0;
55053 +unsigned long grsec_alert_fyet = 0;
55054 +
55055 +DEFINE_SPINLOCK(grsec_audit_lock);
55056 +
55057 +DEFINE_RWLOCK(grsec_exec_file_lock);
55058 +
55059 +char *gr_shared_page[4];
55060 +
55061 +char *gr_alert_log_fmt;
55062 +char *gr_audit_log_fmt;
55063 +char *gr_alert_log_buf;
55064 +char *gr_audit_log_buf;
55065 +
55066 +extern struct gr_arg *gr_usermode;
55067 +extern unsigned char *gr_system_salt;
55068 +extern unsigned char *gr_system_sum;
55069 +
55070 +void __init
55071 +grsecurity_init(void)
55072 +{
55073 + int j;
55074 + /* create the per-cpu shared pages */
55075 +
55076 +#ifdef CONFIG_X86
55077 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55078 +#endif
55079 +
55080 + for (j = 0; j < 4; j++) {
55081 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55082 + if (gr_shared_page[j] == NULL) {
55083 + panic("Unable to allocate grsecurity shared page");
55084 + return;
55085 + }
55086 + }
55087 +
55088 + /* allocate log buffers */
55089 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55090 + if (!gr_alert_log_fmt) {
55091 + panic("Unable to allocate grsecurity alert log format buffer");
55092 + return;
55093 + }
55094 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55095 + if (!gr_audit_log_fmt) {
55096 + panic("Unable to allocate grsecurity audit log format buffer");
55097 + return;
55098 + }
55099 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55100 + if (!gr_alert_log_buf) {
55101 + panic("Unable to allocate grsecurity alert log buffer");
55102 + return;
55103 + }
55104 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55105 + if (!gr_audit_log_buf) {
55106 + panic("Unable to allocate grsecurity audit log buffer");
55107 + return;
55108 + }
55109 +
55110 + /* allocate memory for authentication structure */
55111 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55112 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55113 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55114 +
55115 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55116 + panic("Unable to allocate grsecurity authentication structure");
55117 + return;
55118 + }
55119 +
55120 +
55121 +#ifdef CONFIG_GRKERNSEC_IO
55122 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55123 + grsec_disable_privio = 1;
55124 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55125 + grsec_disable_privio = 1;
55126 +#else
55127 + grsec_disable_privio = 0;
55128 +#endif
55129 +#endif
55130 +
55131 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55132 + /* for backward compatibility, tpe_invert always defaults to on if
55133 + enabled in the kernel
55134 + */
55135 + grsec_enable_tpe_invert = 1;
55136 +#endif
55137 +
55138 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55139 +#ifndef CONFIG_GRKERNSEC_SYSCTL
55140 + grsec_lock = 1;
55141 +#endif
55142 +
55143 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55144 + grsec_enable_audit_textrel = 1;
55145 +#endif
55146 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55147 + grsec_enable_log_rwxmaps = 1;
55148 +#endif
55149 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55150 + grsec_enable_group = 1;
55151 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55152 +#endif
55153 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55154 + grsec_enable_ptrace_readexec = 1;
55155 +#endif
55156 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55157 + grsec_enable_chdir = 1;
55158 +#endif
55159 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55160 + grsec_enable_harden_ptrace = 1;
55161 +#endif
55162 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55163 + grsec_enable_mount = 1;
55164 +#endif
55165 +#ifdef CONFIG_GRKERNSEC_LINK
55166 + grsec_enable_link = 1;
55167 +#endif
55168 +#ifdef CONFIG_GRKERNSEC_BRUTE
55169 + grsec_enable_brute = 1;
55170 +#endif
55171 +#ifdef CONFIG_GRKERNSEC_DMESG
55172 + grsec_enable_dmesg = 1;
55173 +#endif
55174 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55175 + grsec_enable_blackhole = 1;
55176 + grsec_lastack_retries = 4;
55177 +#endif
55178 +#ifdef CONFIG_GRKERNSEC_FIFO
55179 + grsec_enable_fifo = 1;
55180 +#endif
55181 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55182 + grsec_enable_execlog = 1;
55183 +#endif
55184 +#ifdef CONFIG_GRKERNSEC_SETXID
55185 + grsec_enable_setxid = 1;
55186 +#endif
55187 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55188 + grsec_enable_signal = 1;
55189 +#endif
55190 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55191 + grsec_enable_forkfail = 1;
55192 +#endif
55193 +#ifdef CONFIG_GRKERNSEC_TIME
55194 + grsec_enable_time = 1;
55195 +#endif
55196 +#ifdef CONFIG_GRKERNSEC_RESLOG
55197 + grsec_resource_logging = 1;
55198 +#endif
55199 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55200 + grsec_enable_chroot_findtask = 1;
55201 +#endif
55202 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55203 + grsec_enable_chroot_unix = 1;
55204 +#endif
55205 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55206 + grsec_enable_chroot_mount = 1;
55207 +#endif
55208 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55209 + grsec_enable_chroot_fchdir = 1;
55210 +#endif
55211 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55212 + grsec_enable_chroot_shmat = 1;
55213 +#endif
55214 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55215 + grsec_enable_audit_ptrace = 1;
55216 +#endif
55217 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55218 + grsec_enable_chroot_double = 1;
55219 +#endif
55220 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55221 + grsec_enable_chroot_pivot = 1;
55222 +#endif
55223 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55224 + grsec_enable_chroot_chdir = 1;
55225 +#endif
55226 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55227 + grsec_enable_chroot_chmod = 1;
55228 +#endif
55229 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55230 + grsec_enable_chroot_mknod = 1;
55231 +#endif
55232 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55233 + grsec_enable_chroot_nice = 1;
55234 +#endif
55235 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55236 + grsec_enable_chroot_execlog = 1;
55237 +#endif
55238 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55239 + grsec_enable_chroot_caps = 1;
55240 +#endif
55241 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55242 + grsec_enable_chroot_sysctl = 1;
55243 +#endif
55244 +#ifdef CONFIG_GRKERNSEC_TPE
55245 + grsec_enable_tpe = 1;
55246 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55247 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55248 + grsec_enable_tpe_all = 1;
55249 +#endif
55250 +#endif
55251 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55252 + grsec_enable_socket_all = 1;
55253 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55254 +#endif
55255 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55256 + grsec_enable_socket_client = 1;
55257 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55258 +#endif
55259 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55260 + grsec_enable_socket_server = 1;
55261 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55262 +#endif
55263 +#endif
55264 +
55265 + return;
55266 +}
55267 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55268 new file mode 100644
55269 index 0000000..3efe141
55270 --- /dev/null
55271 +++ b/grsecurity/grsec_link.c
55272 @@ -0,0 +1,43 @@
55273 +#include <linux/kernel.h>
55274 +#include <linux/sched.h>
55275 +#include <linux/fs.h>
55276 +#include <linux/file.h>
55277 +#include <linux/grinternal.h>
55278 +
55279 +int
55280 +gr_handle_follow_link(const struct inode *parent,
55281 + const struct inode *inode,
55282 + const struct dentry *dentry, const struct vfsmount *mnt)
55283 +{
55284 +#ifdef CONFIG_GRKERNSEC_LINK
55285 + const struct cred *cred = current_cred();
55286 +
55287 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55288 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55289 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55290 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55291 + return -EACCES;
55292 + }
55293 +#endif
55294 + return 0;
55295 +}
55296 +
55297 +int
55298 +gr_handle_hardlink(const struct dentry *dentry,
55299 + const struct vfsmount *mnt,
55300 + struct inode *inode, const int mode, const char *to)
55301 +{
55302 +#ifdef CONFIG_GRKERNSEC_LINK
55303 + const struct cred *cred = current_cred();
55304 +
55305 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55306 + (!S_ISREG(mode) || (mode & S_ISUID) ||
55307 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55308 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55309 + !capable(CAP_FOWNER) && cred->uid) {
55310 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55311 + return -EPERM;
55312 + }
55313 +#endif
55314 + return 0;
55315 +}
55316 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55317 new file mode 100644
55318 index 0000000..a45d2e9
55319 --- /dev/null
55320 +++ b/grsecurity/grsec_log.c
55321 @@ -0,0 +1,322 @@
55322 +#include <linux/kernel.h>
55323 +#include <linux/sched.h>
55324 +#include <linux/file.h>
55325 +#include <linux/tty.h>
55326 +#include <linux/fs.h>
55327 +#include <linux/grinternal.h>
55328 +
55329 +#ifdef CONFIG_TREE_PREEMPT_RCU
55330 +#define DISABLE_PREEMPT() preempt_disable()
55331 +#define ENABLE_PREEMPT() preempt_enable()
55332 +#else
55333 +#define DISABLE_PREEMPT()
55334 +#define ENABLE_PREEMPT()
55335 +#endif
55336 +
55337 +#define BEGIN_LOCKS(x) \
55338 + DISABLE_PREEMPT(); \
55339 + rcu_read_lock(); \
55340 + read_lock(&tasklist_lock); \
55341 + read_lock(&grsec_exec_file_lock); \
55342 + if (x != GR_DO_AUDIT) \
55343 + spin_lock(&grsec_alert_lock); \
55344 + else \
55345 + spin_lock(&grsec_audit_lock)
55346 +
55347 +#define END_LOCKS(x) \
55348 + if (x != GR_DO_AUDIT) \
55349 + spin_unlock(&grsec_alert_lock); \
55350 + else \
55351 + spin_unlock(&grsec_audit_lock); \
55352 + read_unlock(&grsec_exec_file_lock); \
55353 + read_unlock(&tasklist_lock); \
55354 + rcu_read_unlock(); \
55355 + ENABLE_PREEMPT(); \
55356 + if (x == GR_DONT_AUDIT) \
55357 + gr_handle_alertkill(current)
55358 +
55359 +enum {
55360 + FLOODING,
55361 + NO_FLOODING
55362 +};
55363 +
55364 +extern char *gr_alert_log_fmt;
55365 +extern char *gr_audit_log_fmt;
55366 +extern char *gr_alert_log_buf;
55367 +extern char *gr_audit_log_buf;
55368 +
55369 +static int gr_log_start(int audit)
55370 +{
55371 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55372 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55373 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55374 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55375 + unsigned long curr_secs = get_seconds();
55376 +
55377 + if (audit == GR_DO_AUDIT)
55378 + goto set_fmt;
55379 +
55380 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55381 + grsec_alert_wtime = curr_secs;
55382 + grsec_alert_fyet = 0;
55383 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55384 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55385 + grsec_alert_fyet++;
55386 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55387 + grsec_alert_wtime = curr_secs;
55388 + grsec_alert_fyet++;
55389 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55390 + return FLOODING;
55391 + }
55392 + else return FLOODING;
55393 +
55394 +set_fmt:
55395 +#endif
55396 + memset(buf, 0, PAGE_SIZE);
55397 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
55398 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55399 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55400 + } else if (current->signal->curr_ip) {
55401 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55402 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55403 + } else if (gr_acl_is_enabled()) {
55404 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55405 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55406 + } else {
55407 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
55408 + strcpy(buf, fmt);
55409 + }
55410 +
55411 + return NO_FLOODING;
55412 +}
55413 +
55414 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55415 + __attribute__ ((format (printf, 2, 0)));
55416 +
55417 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55418 +{
55419 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55420 + unsigned int len = strlen(buf);
55421 +
55422 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55423 +
55424 + return;
55425 +}
55426 +
55427 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55428 + __attribute__ ((format (printf, 2, 3)));
55429 +
55430 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55431 +{
55432 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55433 + unsigned int len = strlen(buf);
55434 + va_list ap;
55435 +
55436 + va_start(ap, msg);
55437 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55438 + va_end(ap);
55439 +
55440 + return;
55441 +}
55442 +
55443 +static void gr_log_end(int audit, int append_default)
55444 +{
55445 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55446 +
55447 + if (append_default) {
55448 + unsigned int len = strlen(buf);
55449 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55450 + }
55451 +
55452 + printk("%s\n", buf);
55453 +
55454 + return;
55455 +}
55456 +
55457 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55458 +{
55459 + int logtype;
55460 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55461 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55462 + void *voidptr = NULL;
55463 + int num1 = 0, num2 = 0;
55464 + unsigned long ulong1 = 0, ulong2 = 0;
55465 + struct dentry *dentry = NULL;
55466 + struct vfsmount *mnt = NULL;
55467 + struct file *file = NULL;
55468 + struct task_struct *task = NULL;
55469 + const struct cred *cred, *pcred;
55470 + va_list ap;
55471 +
55472 + BEGIN_LOCKS(audit);
55473 + logtype = gr_log_start(audit);
55474 + if (logtype == FLOODING) {
55475 + END_LOCKS(audit);
55476 + return;
55477 + }
55478 + va_start(ap, argtypes);
55479 + switch (argtypes) {
55480 + case GR_TTYSNIFF:
55481 + task = va_arg(ap, struct task_struct *);
55482 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55483 + break;
55484 + case GR_SYSCTL_HIDDEN:
55485 + str1 = va_arg(ap, char *);
55486 + gr_log_middle_varargs(audit, msg, result, str1);
55487 + break;
55488 + case GR_RBAC:
55489 + dentry = va_arg(ap, struct dentry *);
55490 + mnt = va_arg(ap, struct vfsmount *);
55491 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55492 + break;
55493 + case GR_RBAC_STR:
55494 + dentry = va_arg(ap, struct dentry *);
55495 + mnt = va_arg(ap, struct vfsmount *);
55496 + str1 = va_arg(ap, char *);
55497 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55498 + break;
55499 + case GR_STR_RBAC:
55500 + str1 = va_arg(ap, char *);
55501 + dentry = va_arg(ap, struct dentry *);
55502 + mnt = va_arg(ap, struct vfsmount *);
55503 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55504 + break;
55505 + case GR_RBAC_MODE2:
55506 + dentry = va_arg(ap, struct dentry *);
55507 + mnt = va_arg(ap, struct vfsmount *);
55508 + str1 = va_arg(ap, char *);
55509 + str2 = va_arg(ap, char *);
55510 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55511 + break;
55512 + case GR_RBAC_MODE3:
55513 + dentry = va_arg(ap, struct dentry *);
55514 + mnt = va_arg(ap, struct vfsmount *);
55515 + str1 = va_arg(ap, char *);
55516 + str2 = va_arg(ap, char *);
55517 + str3 = va_arg(ap, char *);
55518 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55519 + break;
55520 + case GR_FILENAME:
55521 + dentry = va_arg(ap, struct dentry *);
55522 + mnt = va_arg(ap, struct vfsmount *);
55523 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55524 + break;
55525 + case GR_STR_FILENAME:
55526 + str1 = va_arg(ap, char *);
55527 + dentry = va_arg(ap, struct dentry *);
55528 + mnt = va_arg(ap, struct vfsmount *);
55529 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55530 + break;
55531 + case GR_FILENAME_STR:
55532 + dentry = va_arg(ap, struct dentry *);
55533 + mnt = va_arg(ap, struct vfsmount *);
55534 + str1 = va_arg(ap, char *);
55535 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55536 + break;
55537 + case GR_FILENAME_TWO_INT:
55538 + dentry = va_arg(ap, struct dentry *);
55539 + mnt = va_arg(ap, struct vfsmount *);
55540 + num1 = va_arg(ap, int);
55541 + num2 = va_arg(ap, int);
55542 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55543 + break;
55544 + case GR_FILENAME_TWO_INT_STR:
55545 + dentry = va_arg(ap, struct dentry *);
55546 + mnt = va_arg(ap, struct vfsmount *);
55547 + num1 = va_arg(ap, int);
55548 + num2 = va_arg(ap, int);
55549 + str1 = va_arg(ap, char *);
55550 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55551 + break;
55552 + case GR_TEXTREL:
55553 + file = va_arg(ap, struct file *);
55554 + ulong1 = va_arg(ap, unsigned long);
55555 + ulong2 = va_arg(ap, unsigned long);
55556 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55557 + break;
55558 + case GR_PTRACE:
55559 + task = va_arg(ap, struct task_struct *);
55560 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55561 + break;
55562 + case GR_RESOURCE:
55563 + task = va_arg(ap, struct task_struct *);
55564 + cred = __task_cred(task);
55565 + pcred = __task_cred(task->real_parent);
55566 + ulong1 = va_arg(ap, unsigned long);
55567 + str1 = va_arg(ap, char *);
55568 + ulong2 = va_arg(ap, unsigned long);
55569 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55570 + break;
55571 + case GR_CAP:
55572 + task = va_arg(ap, struct task_struct *);
55573 + cred = __task_cred(task);
55574 + pcred = __task_cred(task->real_parent);
55575 + str1 = va_arg(ap, char *);
55576 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55577 + break;
55578 + case GR_SIG:
55579 + str1 = va_arg(ap, char *);
55580 + voidptr = va_arg(ap, void *);
55581 + gr_log_middle_varargs(audit, msg, str1, voidptr);
55582 + break;
55583 + case GR_SIG2:
55584 + task = va_arg(ap, struct task_struct *);
55585 + cred = __task_cred(task);
55586 + pcred = __task_cred(task->real_parent);
55587 + num1 = va_arg(ap, int);
55588 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55589 + break;
55590 + case GR_CRASH1:
55591 + task = va_arg(ap, struct task_struct *);
55592 + cred = __task_cred(task);
55593 + pcred = __task_cred(task->real_parent);
55594 + ulong1 = va_arg(ap, unsigned long);
55595 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55596 + break;
55597 + case GR_CRASH2:
55598 + task = va_arg(ap, struct task_struct *);
55599 + cred = __task_cred(task);
55600 + pcred = __task_cred(task->real_parent);
55601 + ulong1 = va_arg(ap, unsigned long);
55602 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55603 + break;
55604 + case GR_RWXMAP:
55605 + file = va_arg(ap, struct file *);
55606 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55607 + break;
55608 + case GR_PSACCT:
55609 + {
55610 + unsigned int wday, cday;
55611 + __u8 whr, chr;
55612 + __u8 wmin, cmin;
55613 + __u8 wsec, csec;
55614 + char cur_tty[64] = { 0 };
55615 + char parent_tty[64] = { 0 };
55616 +
55617 + task = va_arg(ap, struct task_struct *);
55618 + wday = va_arg(ap, unsigned int);
55619 + cday = va_arg(ap, unsigned int);
55620 + whr = va_arg(ap, int);
55621 + chr = va_arg(ap, int);
55622 + wmin = va_arg(ap, int);
55623 + cmin = va_arg(ap, int);
55624 + wsec = va_arg(ap, int);
55625 + csec = va_arg(ap, int);
55626 + ulong1 = va_arg(ap, unsigned long);
55627 + cred = __task_cred(task);
55628 + pcred = __task_cred(task->real_parent);
55629 +
55630 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55631 + }
55632 + break;
55633 + default:
55634 + gr_log_middle(audit, msg, ap);
55635 + }
55636 + va_end(ap);
55637 + // these don't need DEFAULTSECARGS printed on the end
55638 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
55639 + gr_log_end(audit, 0);
55640 + else
55641 + gr_log_end(audit, 1);
55642 + END_LOCKS(audit);
55643 +}
55644 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
55645 new file mode 100644
55646 index 0000000..6c0416b
55647 --- /dev/null
55648 +++ b/grsecurity/grsec_mem.c
55649 @@ -0,0 +1,33 @@
55650 +#include <linux/kernel.h>
55651 +#include <linux/sched.h>
55652 +#include <linux/mm.h>
55653 +#include <linux/mman.h>
55654 +#include <linux/grinternal.h>
55655 +
55656 +void
55657 +gr_handle_ioperm(void)
55658 +{
55659 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
55660 + return;
55661 +}
55662 +
55663 +void
55664 +gr_handle_iopl(void)
55665 +{
55666 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
55667 + return;
55668 +}
55669 +
55670 +void
55671 +gr_handle_mem_readwrite(u64 from, u64 to)
55672 +{
55673 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
55674 + return;
55675 +}
55676 +
55677 +void
55678 +gr_handle_vm86(void)
55679 +{
55680 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
55681 + return;
55682 +}
55683 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
55684 new file mode 100644
55685 index 0000000..2131422
55686 --- /dev/null
55687 +++ b/grsecurity/grsec_mount.c
55688 @@ -0,0 +1,62 @@
55689 +#include <linux/kernel.h>
55690 +#include <linux/sched.h>
55691 +#include <linux/mount.h>
55692 +#include <linux/grsecurity.h>
55693 +#include <linux/grinternal.h>
55694 +
55695 +void
55696 +gr_log_remount(const char *devname, const int retval)
55697 +{
55698 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55699 + if (grsec_enable_mount && (retval >= 0))
55700 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
55701 +#endif
55702 + return;
55703 +}
55704 +
55705 +void
55706 +gr_log_unmount(const char *devname, const int retval)
55707 +{
55708 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55709 + if (grsec_enable_mount && (retval >= 0))
55710 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
55711 +#endif
55712 + return;
55713 +}
55714 +
55715 +void
55716 +gr_log_mount(const char *from, const char *to, const int retval)
55717 +{
55718 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55719 + if (grsec_enable_mount && (retval >= 0))
55720 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
55721 +#endif
55722 + return;
55723 +}
55724 +
55725 +int
55726 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
55727 +{
55728 +#ifdef CONFIG_GRKERNSEC_ROFS
55729 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
55730 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
55731 + return -EPERM;
55732 + } else
55733 + return 0;
55734 +#endif
55735 + return 0;
55736 +}
55737 +
55738 +int
55739 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
55740 +{
55741 +#ifdef CONFIG_GRKERNSEC_ROFS
55742 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
55743 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
55744 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
55745 + return -EPERM;
55746 + } else
55747 + return 0;
55748 +#endif
55749 + return 0;
55750 +}
55751 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
55752 new file mode 100644
55753 index 0000000..a3b12a0
55754 --- /dev/null
55755 +++ b/grsecurity/grsec_pax.c
55756 @@ -0,0 +1,36 @@
55757 +#include <linux/kernel.h>
55758 +#include <linux/sched.h>
55759 +#include <linux/mm.h>
55760 +#include <linux/file.h>
55761 +#include <linux/grinternal.h>
55762 +#include <linux/grsecurity.h>
55763 +
55764 +void
55765 +gr_log_textrel(struct vm_area_struct * vma)
55766 +{
55767 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55768 + if (grsec_enable_audit_textrel)
55769 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
55770 +#endif
55771 + return;
55772 +}
55773 +
55774 +void
55775 +gr_log_rwxmmap(struct file *file)
55776 +{
55777 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55778 + if (grsec_enable_log_rwxmaps)
55779 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
55780 +#endif
55781 + return;
55782 +}
55783 +
55784 +void
55785 +gr_log_rwxmprotect(struct file *file)
55786 +{
55787 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55788 + if (grsec_enable_log_rwxmaps)
55789 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
55790 +#endif
55791 + return;
55792 +}
55793 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
55794 new file mode 100644
55795 index 0000000..f7f29aa
55796 --- /dev/null
55797 +++ b/grsecurity/grsec_ptrace.c
55798 @@ -0,0 +1,30 @@
55799 +#include <linux/kernel.h>
55800 +#include <linux/sched.h>
55801 +#include <linux/grinternal.h>
55802 +#include <linux/security.h>
55803 +
55804 +void
55805 +gr_audit_ptrace(struct task_struct *task)
55806 +{
55807 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55808 + if (grsec_enable_audit_ptrace)
55809 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
55810 +#endif
55811 + return;
55812 +}
55813 +
55814 +int
55815 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
55816 +{
55817 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55818 + const struct dentry *dentry = file->f_path.dentry;
55819 + const struct vfsmount *mnt = file->f_path.mnt;
55820 +
55821 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
55822 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
55823 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
55824 + return -EACCES;
55825 + }
55826 +#endif
55827 + return 0;
55828 +}
55829 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
55830 new file mode 100644
55831 index 0000000..7a5b2de
55832 --- /dev/null
55833 +++ b/grsecurity/grsec_sig.c
55834 @@ -0,0 +1,207 @@
55835 +#include <linux/kernel.h>
55836 +#include <linux/sched.h>
55837 +#include <linux/delay.h>
55838 +#include <linux/grsecurity.h>
55839 +#include <linux/grinternal.h>
55840 +#include <linux/hardirq.h>
55841 +
55842 +char *signames[] = {
55843 + [SIGSEGV] = "Segmentation fault",
55844 + [SIGILL] = "Illegal instruction",
55845 + [SIGABRT] = "Abort",
55846 + [SIGBUS] = "Invalid alignment/Bus error"
55847 +};
55848 +
55849 +void
55850 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
55851 +{
55852 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55853 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
55854 + (sig == SIGABRT) || (sig == SIGBUS))) {
55855 + if (t->pid == current->pid) {
55856 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
55857 + } else {
55858 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
55859 + }
55860 + }
55861 +#endif
55862 + return;
55863 +}
55864 +
55865 +int
55866 +gr_handle_signal(const struct task_struct *p, const int sig)
55867 +{
55868 +#ifdef CONFIG_GRKERNSEC
55869 + /* ignore the 0 signal for protected task checks */
55870 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
55871 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
55872 + return -EPERM;
55873 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
55874 + return -EPERM;
55875 + }
55876 +#endif
55877 + return 0;
55878 +}
55879 +
55880 +#ifdef CONFIG_GRKERNSEC
55881 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
55882 +
55883 +int gr_fake_force_sig(int sig, struct task_struct *t)
55884 +{
55885 + unsigned long int flags;
55886 + int ret, blocked, ignored;
55887 + struct k_sigaction *action;
55888 +
55889 + spin_lock_irqsave(&t->sighand->siglock, flags);
55890 + action = &t->sighand->action[sig-1];
55891 + ignored = action->sa.sa_handler == SIG_IGN;
55892 + blocked = sigismember(&t->blocked, sig);
55893 + if (blocked || ignored) {
55894 + action->sa.sa_handler = SIG_DFL;
55895 + if (blocked) {
55896 + sigdelset(&t->blocked, sig);
55897 + recalc_sigpending_and_wake(t);
55898 + }
55899 + }
55900 + if (action->sa.sa_handler == SIG_DFL)
55901 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
55902 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
55903 +
55904 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
55905 +
55906 + return ret;
55907 +}
55908 +#endif
55909 +
55910 +#ifdef CONFIG_GRKERNSEC_BRUTE
55911 +#define GR_USER_BAN_TIME (15 * 60)
55912 +
55913 +static int __get_dumpable(unsigned long mm_flags)
55914 +{
55915 + int ret;
55916 +
55917 + ret = mm_flags & MMF_DUMPABLE_MASK;
55918 + return (ret >= 2) ? 2 : ret;
55919 +}
55920 +#endif
55921 +
55922 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
55923 +{
55924 +#ifdef CONFIG_GRKERNSEC_BRUTE
55925 + uid_t uid = 0;
55926 +
55927 + if (!grsec_enable_brute)
55928 + return;
55929 +
55930 + rcu_read_lock();
55931 + read_lock(&tasklist_lock);
55932 + read_lock(&grsec_exec_file_lock);
55933 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
55934 + p->real_parent->brute = 1;
55935 + else {
55936 + const struct cred *cred = __task_cred(p), *cred2;
55937 + struct task_struct *tsk, *tsk2;
55938 +
55939 + if (!__get_dumpable(mm_flags) && cred->uid) {
55940 + struct user_struct *user;
55941 +
55942 + uid = cred->uid;
55943 +
55944 + /* this is put upon execution past expiration */
55945 + user = find_user(uid);
55946 + if (user == NULL)
55947 + goto unlock;
55948 + user->banned = 1;
55949 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
55950 + if (user->ban_expires == ~0UL)
55951 + user->ban_expires--;
55952 +
55953 + do_each_thread(tsk2, tsk) {
55954 + cred2 = __task_cred(tsk);
55955 + if (tsk != p && cred2->uid == uid)
55956 + gr_fake_force_sig(SIGKILL, tsk);
55957 + } while_each_thread(tsk2, tsk);
55958 + }
55959 + }
55960 +unlock:
55961 + read_unlock(&grsec_exec_file_lock);
55962 + read_unlock(&tasklist_lock);
55963 + rcu_read_unlock();
55964 +
55965 + if (uid)
55966 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
55967 +
55968 +#endif
55969 + return;
55970 +}
55971 +
55972 +void gr_handle_brute_check(void)
55973 +{
55974 +#ifdef CONFIG_GRKERNSEC_BRUTE
55975 + if (current->brute)
55976 + msleep(30 * 1000);
55977 +#endif
55978 + return;
55979 +}
55980 +
55981 +void gr_handle_kernel_exploit(void)
55982 +{
55983 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
55984 + const struct cred *cred;
55985 + struct task_struct *tsk, *tsk2;
55986 + struct user_struct *user;
55987 + uid_t uid;
55988 +
55989 + if (in_irq() || in_serving_softirq() || in_nmi())
55990 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
55991 +
55992 + uid = current_uid();
55993 +
55994 + if (uid == 0)
55995 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
55996 + else {
55997 + /* kill all the processes of this user, hold a reference
55998 + to their creds struct, and prevent them from creating
55999 + another process until system reset
56000 + */
56001 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56002 + /* we intentionally leak this ref */
56003 + user = get_uid(current->cred->user);
56004 + if (user) {
56005 + user->banned = 1;
56006 + user->ban_expires = ~0UL;
56007 + }
56008 +
56009 + read_lock(&tasklist_lock);
56010 + do_each_thread(tsk2, tsk) {
56011 + cred = __task_cred(tsk);
56012 + if (cred->uid == uid)
56013 + gr_fake_force_sig(SIGKILL, tsk);
56014 + } while_each_thread(tsk2, tsk);
56015 + read_unlock(&tasklist_lock);
56016 + }
56017 +#endif
56018 +}
56019 +
56020 +int __gr_process_user_ban(struct user_struct *user)
56021 +{
56022 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56023 + if (unlikely(user->banned)) {
56024 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56025 + user->banned = 0;
56026 + user->ban_expires = 0;
56027 + free_uid(user);
56028 + } else
56029 + return -EPERM;
56030 + }
56031 +#endif
56032 + return 0;
56033 +}
56034 +
56035 +int gr_process_user_ban(void)
56036 +{
56037 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56038 + return __gr_process_user_ban(current->cred->user);
56039 +#endif
56040 + return 0;
56041 +}
56042 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56043 new file mode 100644
56044 index 0000000..4030d57
56045 --- /dev/null
56046 +++ b/grsecurity/grsec_sock.c
56047 @@ -0,0 +1,244 @@
56048 +#include <linux/kernel.h>
56049 +#include <linux/module.h>
56050 +#include <linux/sched.h>
56051 +#include <linux/file.h>
56052 +#include <linux/net.h>
56053 +#include <linux/in.h>
56054 +#include <linux/ip.h>
56055 +#include <net/sock.h>
56056 +#include <net/inet_sock.h>
56057 +#include <linux/grsecurity.h>
56058 +#include <linux/grinternal.h>
56059 +#include <linux/gracl.h>
56060 +
56061 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56062 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56063 +
56064 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
56065 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
56066 +
56067 +#ifdef CONFIG_UNIX_MODULE
56068 +EXPORT_SYMBOL(gr_acl_handle_unix);
56069 +EXPORT_SYMBOL(gr_acl_handle_mknod);
56070 +EXPORT_SYMBOL(gr_handle_chroot_unix);
56071 +EXPORT_SYMBOL(gr_handle_create);
56072 +#endif
56073 +
56074 +#ifdef CONFIG_GRKERNSEC
56075 +#define gr_conn_table_size 32749
56076 +struct conn_table_entry {
56077 + struct conn_table_entry *next;
56078 + struct signal_struct *sig;
56079 +};
56080 +
56081 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56082 +DEFINE_SPINLOCK(gr_conn_table_lock);
56083 +
56084 +extern const char * gr_socktype_to_name(unsigned char type);
56085 +extern const char * gr_proto_to_name(unsigned char proto);
56086 +extern const char * gr_sockfamily_to_name(unsigned char family);
56087 +
56088 +static __inline__ int
56089 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56090 +{
56091 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56092 +}
56093 +
56094 +static __inline__ int
56095 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56096 + __u16 sport, __u16 dport)
56097 +{
56098 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56099 + sig->gr_sport == sport && sig->gr_dport == dport))
56100 + return 1;
56101 + else
56102 + return 0;
56103 +}
56104 +
56105 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56106 +{
56107 + struct conn_table_entry **match;
56108 + unsigned int index;
56109 +
56110 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56111 + sig->gr_sport, sig->gr_dport,
56112 + gr_conn_table_size);
56113 +
56114 + newent->sig = sig;
56115 +
56116 + match = &gr_conn_table[index];
56117 + newent->next = *match;
56118 + *match = newent;
56119 +
56120 + return;
56121 +}
56122 +
56123 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56124 +{
56125 + struct conn_table_entry *match, *last = NULL;
56126 + unsigned int index;
56127 +
56128 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56129 + sig->gr_sport, sig->gr_dport,
56130 + gr_conn_table_size);
56131 +
56132 + match = gr_conn_table[index];
56133 + while (match && !conn_match(match->sig,
56134 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56135 + sig->gr_dport)) {
56136 + last = match;
56137 + match = match->next;
56138 + }
56139 +
56140 + if (match) {
56141 + if (last)
56142 + last->next = match->next;
56143 + else
56144 + gr_conn_table[index] = NULL;
56145 + kfree(match);
56146 + }
56147 +
56148 + return;
56149 +}
56150 +
56151 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56152 + __u16 sport, __u16 dport)
56153 +{
56154 + struct conn_table_entry *match;
56155 + unsigned int index;
56156 +
56157 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56158 +
56159 + match = gr_conn_table[index];
56160 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56161 + match = match->next;
56162 +
56163 + if (match)
56164 + return match->sig;
56165 + else
56166 + return NULL;
56167 +}
56168 +
56169 +#endif
56170 +
56171 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56172 +{
56173 +#ifdef CONFIG_GRKERNSEC
56174 + struct signal_struct *sig = task->signal;
56175 + struct conn_table_entry *newent;
56176 +
56177 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56178 + if (newent == NULL)
56179 + return;
56180 + /* no bh lock needed since we are called with bh disabled */
56181 + spin_lock(&gr_conn_table_lock);
56182 + gr_del_task_from_ip_table_nolock(sig);
56183 + sig->gr_saddr = inet->inet_rcv_saddr;
56184 + sig->gr_daddr = inet->inet_daddr;
56185 + sig->gr_sport = inet->inet_sport;
56186 + sig->gr_dport = inet->inet_dport;
56187 + gr_add_to_task_ip_table_nolock(sig, newent);
56188 + spin_unlock(&gr_conn_table_lock);
56189 +#endif
56190 + return;
56191 +}
56192 +
56193 +void gr_del_task_from_ip_table(struct task_struct *task)
56194 +{
56195 +#ifdef CONFIG_GRKERNSEC
56196 + spin_lock_bh(&gr_conn_table_lock);
56197 + gr_del_task_from_ip_table_nolock(task->signal);
56198 + spin_unlock_bh(&gr_conn_table_lock);
56199 +#endif
56200 + return;
56201 +}
56202 +
56203 +void
56204 +gr_attach_curr_ip(const struct sock *sk)
56205 +{
56206 +#ifdef CONFIG_GRKERNSEC
56207 + struct signal_struct *p, *set;
56208 + const struct inet_sock *inet = inet_sk(sk);
56209 +
56210 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56211 + return;
56212 +
56213 + set = current->signal;
56214 +
56215 + spin_lock_bh(&gr_conn_table_lock);
56216 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56217 + inet->inet_dport, inet->inet_sport);
56218 + if (unlikely(p != NULL)) {
56219 + set->curr_ip = p->curr_ip;
56220 + set->used_accept = 1;
56221 + gr_del_task_from_ip_table_nolock(p);
56222 + spin_unlock_bh(&gr_conn_table_lock);
56223 + return;
56224 + }
56225 + spin_unlock_bh(&gr_conn_table_lock);
56226 +
56227 + set->curr_ip = inet->inet_daddr;
56228 + set->used_accept = 1;
56229 +#endif
56230 + return;
56231 +}
56232 +
56233 +int
56234 +gr_handle_sock_all(const int family, const int type, const int protocol)
56235 +{
56236 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56237 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56238 + (family != AF_UNIX)) {
56239 + if (family == AF_INET)
56240 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56241 + else
56242 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56243 + return -EACCES;
56244 + }
56245 +#endif
56246 + return 0;
56247 +}
56248 +
56249 +int
56250 +gr_handle_sock_server(const struct sockaddr *sck)
56251 +{
56252 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56253 + if (grsec_enable_socket_server &&
56254 + in_group_p(grsec_socket_server_gid) &&
56255 + sck && (sck->sa_family != AF_UNIX) &&
56256 + (sck->sa_family != AF_LOCAL)) {
56257 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56258 + return -EACCES;
56259 + }
56260 +#endif
56261 + return 0;
56262 +}
56263 +
56264 +int
56265 +gr_handle_sock_server_other(const struct sock *sck)
56266 +{
56267 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56268 + if (grsec_enable_socket_server &&
56269 + in_group_p(grsec_socket_server_gid) &&
56270 + sck && (sck->sk_family != AF_UNIX) &&
56271 + (sck->sk_family != AF_LOCAL)) {
56272 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56273 + return -EACCES;
56274 + }
56275 +#endif
56276 + return 0;
56277 +}
56278 +
56279 +int
56280 +gr_handle_sock_client(const struct sockaddr *sck)
56281 +{
56282 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56283 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56284 + sck && (sck->sa_family != AF_UNIX) &&
56285 + (sck->sa_family != AF_LOCAL)) {
56286 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56287 + return -EACCES;
56288 + }
56289 +#endif
56290 + return 0;
56291 +}
56292 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56293 new file mode 100644
56294 index 0000000..a1aedd7
56295 --- /dev/null
56296 +++ b/grsecurity/grsec_sysctl.c
56297 @@ -0,0 +1,451 @@
56298 +#include <linux/kernel.h>
56299 +#include <linux/sched.h>
56300 +#include <linux/sysctl.h>
56301 +#include <linux/grsecurity.h>
56302 +#include <linux/grinternal.h>
56303 +
56304 +int
56305 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56306 +{
56307 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56308 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56309 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56310 + return -EACCES;
56311 + }
56312 +#endif
56313 + return 0;
56314 +}
56315 +
56316 +#ifdef CONFIG_GRKERNSEC_ROFS
56317 +static int __maybe_unused one = 1;
56318 +#endif
56319 +
56320 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56321 +struct ctl_table grsecurity_table[] = {
56322 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56323 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56324 +#ifdef CONFIG_GRKERNSEC_IO
56325 + {
56326 + .procname = "disable_priv_io",
56327 + .data = &grsec_disable_privio,
56328 + .maxlen = sizeof(int),
56329 + .mode = 0600,
56330 + .proc_handler = &proc_dointvec,
56331 + },
56332 +#endif
56333 +#endif
56334 +#ifdef CONFIG_GRKERNSEC_LINK
56335 + {
56336 + .procname = "linking_restrictions",
56337 + .data = &grsec_enable_link,
56338 + .maxlen = sizeof(int),
56339 + .mode = 0600,
56340 + .proc_handler = &proc_dointvec,
56341 + },
56342 +#endif
56343 +#ifdef CONFIG_GRKERNSEC_BRUTE
56344 + {
56345 + .procname = "deter_bruteforce",
56346 + .data = &grsec_enable_brute,
56347 + .maxlen = sizeof(int),
56348 + .mode = 0600,
56349 + .proc_handler = &proc_dointvec,
56350 + },
56351 +#endif
56352 +#ifdef CONFIG_GRKERNSEC_FIFO
56353 + {
56354 + .procname = "fifo_restrictions",
56355 + .data = &grsec_enable_fifo,
56356 + .maxlen = sizeof(int),
56357 + .mode = 0600,
56358 + .proc_handler = &proc_dointvec,
56359 + },
56360 +#endif
56361 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56362 + {
56363 + .procname = "ptrace_readexec",
56364 + .data = &grsec_enable_ptrace_readexec,
56365 + .maxlen = sizeof(int),
56366 + .mode = 0600,
56367 + .proc_handler = &proc_dointvec,
56368 + },
56369 +#endif
56370 +#ifdef CONFIG_GRKERNSEC_SETXID
56371 + {
56372 + .procname = "consistent_setxid",
56373 + .data = &grsec_enable_setxid,
56374 + .maxlen = sizeof(int),
56375 + .mode = 0600,
56376 + .proc_handler = &proc_dointvec,
56377 + },
56378 +#endif
56379 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56380 + {
56381 + .procname = "ip_blackhole",
56382 + .data = &grsec_enable_blackhole,
56383 + .maxlen = sizeof(int),
56384 + .mode = 0600,
56385 + .proc_handler = &proc_dointvec,
56386 + },
56387 + {
56388 + .procname = "lastack_retries",
56389 + .data = &grsec_lastack_retries,
56390 + .maxlen = sizeof(int),
56391 + .mode = 0600,
56392 + .proc_handler = &proc_dointvec,
56393 + },
56394 +#endif
56395 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56396 + {
56397 + .procname = "exec_logging",
56398 + .data = &grsec_enable_execlog,
56399 + .maxlen = sizeof(int),
56400 + .mode = 0600,
56401 + .proc_handler = &proc_dointvec,
56402 + },
56403 +#endif
56404 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56405 + {
56406 + .procname = "rwxmap_logging",
56407 + .data = &grsec_enable_log_rwxmaps,
56408 + .maxlen = sizeof(int),
56409 + .mode = 0600,
56410 + .proc_handler = &proc_dointvec,
56411 + },
56412 +#endif
56413 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56414 + {
56415 + .procname = "signal_logging",
56416 + .data = &grsec_enable_signal,
56417 + .maxlen = sizeof(int),
56418 + .mode = 0600,
56419 + .proc_handler = &proc_dointvec,
56420 + },
56421 +#endif
56422 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56423 + {
56424 + .procname = "forkfail_logging",
56425 + .data = &grsec_enable_forkfail,
56426 + .maxlen = sizeof(int),
56427 + .mode = 0600,
56428 + .proc_handler = &proc_dointvec,
56429 + },
56430 +#endif
56431 +#ifdef CONFIG_GRKERNSEC_TIME
56432 + {
56433 + .procname = "timechange_logging",
56434 + .data = &grsec_enable_time,
56435 + .maxlen = sizeof(int),
56436 + .mode = 0600,
56437 + .proc_handler = &proc_dointvec,
56438 + },
56439 +#endif
56440 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56441 + {
56442 + .procname = "chroot_deny_shmat",
56443 + .data = &grsec_enable_chroot_shmat,
56444 + .maxlen = sizeof(int),
56445 + .mode = 0600,
56446 + .proc_handler = &proc_dointvec,
56447 + },
56448 +#endif
56449 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56450 + {
56451 + .procname = "chroot_deny_unix",
56452 + .data = &grsec_enable_chroot_unix,
56453 + .maxlen = sizeof(int),
56454 + .mode = 0600,
56455 + .proc_handler = &proc_dointvec,
56456 + },
56457 +#endif
56458 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56459 + {
56460 + .procname = "chroot_deny_mount",
56461 + .data = &grsec_enable_chroot_mount,
56462 + .maxlen = sizeof(int),
56463 + .mode = 0600,
56464 + .proc_handler = &proc_dointvec,
56465 + },
56466 +#endif
56467 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56468 + {
56469 + .procname = "chroot_deny_fchdir",
56470 + .data = &grsec_enable_chroot_fchdir,
56471 + .maxlen = sizeof(int),
56472 + .mode = 0600,
56473 + .proc_handler = &proc_dointvec,
56474 + },
56475 +#endif
56476 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56477 + {
56478 + .procname = "chroot_deny_chroot",
56479 + .data = &grsec_enable_chroot_double,
56480 + .maxlen = sizeof(int),
56481 + .mode = 0600,
56482 + .proc_handler = &proc_dointvec,
56483 + },
56484 +#endif
56485 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56486 + {
56487 + .procname = "chroot_deny_pivot",
56488 + .data = &grsec_enable_chroot_pivot,
56489 + .maxlen = sizeof(int),
56490 + .mode = 0600,
56491 + .proc_handler = &proc_dointvec,
56492 + },
56493 +#endif
56494 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56495 + {
56496 + .procname = "chroot_enforce_chdir",
56497 + .data = &grsec_enable_chroot_chdir,
56498 + .maxlen = sizeof(int),
56499 + .mode = 0600,
56500 + .proc_handler = &proc_dointvec,
56501 + },
56502 +#endif
56503 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56504 + {
56505 + .procname = "chroot_deny_chmod",
56506 + .data = &grsec_enable_chroot_chmod,
56507 + .maxlen = sizeof(int),
56508 + .mode = 0600,
56509 + .proc_handler = &proc_dointvec,
56510 + },
56511 +#endif
56512 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56513 + {
56514 + .procname = "chroot_deny_mknod",
56515 + .data = &grsec_enable_chroot_mknod,
56516 + .maxlen = sizeof(int),
56517 + .mode = 0600,
56518 + .proc_handler = &proc_dointvec,
56519 + },
56520 +#endif
56521 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56522 + {
56523 + .procname = "chroot_restrict_nice",
56524 + .data = &grsec_enable_chroot_nice,
56525 + .maxlen = sizeof(int),
56526 + .mode = 0600,
56527 + .proc_handler = &proc_dointvec,
56528 + },
56529 +#endif
56530 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56531 + {
56532 + .procname = "chroot_execlog",
56533 + .data = &grsec_enable_chroot_execlog,
56534 + .maxlen = sizeof(int),
56535 + .mode = 0600,
56536 + .proc_handler = &proc_dointvec,
56537 + },
56538 +#endif
56539 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56540 + {
56541 + .procname = "chroot_caps",
56542 + .data = &grsec_enable_chroot_caps,
56543 + .maxlen = sizeof(int),
56544 + .mode = 0600,
56545 + .proc_handler = &proc_dointvec,
56546 + },
56547 +#endif
56548 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56549 + {
56550 + .procname = "chroot_deny_sysctl",
56551 + .data = &grsec_enable_chroot_sysctl,
56552 + .maxlen = sizeof(int),
56553 + .mode = 0600,
56554 + .proc_handler = &proc_dointvec,
56555 + },
56556 +#endif
56557 +#ifdef CONFIG_GRKERNSEC_TPE
56558 + {
56559 + .procname = "tpe",
56560 + .data = &grsec_enable_tpe,
56561 + .maxlen = sizeof(int),
56562 + .mode = 0600,
56563 + .proc_handler = &proc_dointvec,
56564 + },
56565 + {
56566 + .procname = "tpe_gid",
56567 + .data = &grsec_tpe_gid,
56568 + .maxlen = sizeof(int),
56569 + .mode = 0600,
56570 + .proc_handler = &proc_dointvec,
56571 + },
56572 +#endif
56573 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56574 + {
56575 + .procname = "tpe_invert",
56576 + .data = &grsec_enable_tpe_invert,
56577 + .maxlen = sizeof(int),
56578 + .mode = 0600,
56579 + .proc_handler = &proc_dointvec,
56580 + },
56581 +#endif
56582 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56583 + {
56584 + .procname = "tpe_restrict_all",
56585 + .data = &grsec_enable_tpe_all,
56586 + .maxlen = sizeof(int),
56587 + .mode = 0600,
56588 + .proc_handler = &proc_dointvec,
56589 + },
56590 +#endif
56591 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56592 + {
56593 + .procname = "socket_all",
56594 + .data = &grsec_enable_socket_all,
56595 + .maxlen = sizeof(int),
56596 + .mode = 0600,
56597 + .proc_handler = &proc_dointvec,
56598 + },
56599 + {
56600 + .procname = "socket_all_gid",
56601 + .data = &grsec_socket_all_gid,
56602 + .maxlen = sizeof(int),
56603 + .mode = 0600,
56604 + .proc_handler = &proc_dointvec,
56605 + },
56606 +#endif
56607 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56608 + {
56609 + .procname = "socket_client",
56610 + .data = &grsec_enable_socket_client,
56611 + .maxlen = sizeof(int),
56612 + .mode = 0600,
56613 + .proc_handler = &proc_dointvec,
56614 + },
56615 + {
56616 + .procname = "socket_client_gid",
56617 + .data = &grsec_socket_client_gid,
56618 + .maxlen = sizeof(int),
56619 + .mode = 0600,
56620 + .proc_handler = &proc_dointvec,
56621 + },
56622 +#endif
56623 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56624 + {
56625 + .procname = "socket_server",
56626 + .data = &grsec_enable_socket_server,
56627 + .maxlen = sizeof(int),
56628 + .mode = 0600,
56629 + .proc_handler = &proc_dointvec,
56630 + },
56631 + {
56632 + .procname = "socket_server_gid",
56633 + .data = &grsec_socket_server_gid,
56634 + .maxlen = sizeof(int),
56635 + .mode = 0600,
56636 + .proc_handler = &proc_dointvec,
56637 + },
56638 +#endif
56639 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56640 + {
56641 + .procname = "audit_group",
56642 + .data = &grsec_enable_group,
56643 + .maxlen = sizeof(int),
56644 + .mode = 0600,
56645 + .proc_handler = &proc_dointvec,
56646 + },
56647 + {
56648 + .procname = "audit_gid",
56649 + .data = &grsec_audit_gid,
56650 + .maxlen = sizeof(int),
56651 + .mode = 0600,
56652 + .proc_handler = &proc_dointvec,
56653 + },
56654 +#endif
56655 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56656 + {
56657 + .procname = "audit_chdir",
56658 + .data = &grsec_enable_chdir,
56659 + .maxlen = sizeof(int),
56660 + .mode = 0600,
56661 + .proc_handler = &proc_dointvec,
56662 + },
56663 +#endif
56664 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56665 + {
56666 + .procname = "audit_mount",
56667 + .data = &grsec_enable_mount,
56668 + .maxlen = sizeof(int),
56669 + .mode = 0600,
56670 + .proc_handler = &proc_dointvec,
56671 + },
56672 +#endif
56673 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56674 + {
56675 + .procname = "audit_textrel",
56676 + .data = &grsec_enable_audit_textrel,
56677 + .maxlen = sizeof(int),
56678 + .mode = 0600,
56679 + .proc_handler = &proc_dointvec,
56680 + },
56681 +#endif
56682 +#ifdef CONFIG_GRKERNSEC_DMESG
56683 + {
56684 + .procname = "dmesg",
56685 + .data = &grsec_enable_dmesg,
56686 + .maxlen = sizeof(int),
56687 + .mode = 0600,
56688 + .proc_handler = &proc_dointvec,
56689 + },
56690 +#endif
56691 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56692 + {
56693 + .procname = "chroot_findtask",
56694 + .data = &grsec_enable_chroot_findtask,
56695 + .maxlen = sizeof(int),
56696 + .mode = 0600,
56697 + .proc_handler = &proc_dointvec,
56698 + },
56699 +#endif
56700 +#ifdef CONFIG_GRKERNSEC_RESLOG
56701 + {
56702 + .procname = "resource_logging",
56703 + .data = &grsec_resource_logging,
56704 + .maxlen = sizeof(int),
56705 + .mode = 0600,
56706 + .proc_handler = &proc_dointvec,
56707 + },
56708 +#endif
56709 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56710 + {
56711 + .procname = "audit_ptrace",
56712 + .data = &grsec_enable_audit_ptrace,
56713 + .maxlen = sizeof(int),
56714 + .mode = 0600,
56715 + .proc_handler = &proc_dointvec,
56716 + },
56717 +#endif
56718 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56719 + {
56720 + .procname = "harden_ptrace",
56721 + .data = &grsec_enable_harden_ptrace,
56722 + .maxlen = sizeof(int),
56723 + .mode = 0600,
56724 + .proc_handler = &proc_dointvec,
56725 + },
56726 +#endif
56727 + {
56728 + .procname = "grsec_lock",
56729 + .data = &grsec_lock,
56730 + .maxlen = sizeof(int),
56731 + .mode = 0600,
56732 + .proc_handler = &proc_dointvec,
56733 + },
56734 +#endif
56735 +#ifdef CONFIG_GRKERNSEC_ROFS
56736 + {
56737 + .procname = "romount_protect",
56738 + .data = &grsec_enable_rofs,
56739 + .maxlen = sizeof(int),
56740 + .mode = 0600,
56741 + .proc_handler = &proc_dointvec_minmax,
56742 + .extra1 = &one,
56743 + .extra2 = &one,
56744 + },
56745 +#endif
56746 + { }
56747 +};
56748 +#endif
56749 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
56750 new file mode 100644
56751 index 0000000..0dc13c3
56752 --- /dev/null
56753 +++ b/grsecurity/grsec_time.c
56754 @@ -0,0 +1,16 @@
56755 +#include <linux/kernel.h>
56756 +#include <linux/sched.h>
56757 +#include <linux/grinternal.h>
56758 +#include <linux/module.h>
56759 +
56760 +void
56761 +gr_log_timechange(void)
56762 +{
56763 +#ifdef CONFIG_GRKERNSEC_TIME
56764 + if (grsec_enable_time)
56765 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
56766 +#endif
56767 + return;
56768 +}
56769 +
56770 +EXPORT_SYMBOL(gr_log_timechange);
56771 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
56772 new file mode 100644
56773 index 0000000..a35ba33
56774 --- /dev/null
56775 +++ b/grsecurity/grsec_tpe.c
56776 @@ -0,0 +1,73 @@
56777 +#include <linux/kernel.h>
56778 +#include <linux/sched.h>
56779 +#include <linux/file.h>
56780 +#include <linux/fs.h>
56781 +#include <linux/grinternal.h>
56782 +
56783 +extern int gr_acl_tpe_check(void);
56784 +
56785 +int
56786 +gr_tpe_allow(const struct file *file)
56787 +{
56788 +#ifdef CONFIG_GRKERNSEC
56789 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
56790 + const struct cred *cred = current_cred();
56791 + char *msg = NULL;
56792 + char *msg2 = NULL;
56793 +
56794 + // never restrict root
56795 + if (!cred->uid)
56796 + return 1;
56797 +
56798 + if (grsec_enable_tpe) {
56799 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56800 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
56801 + msg = "not being in trusted group";
56802 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
56803 + msg = "being in untrusted group";
56804 +#else
56805 + if (in_group_p(grsec_tpe_gid))
56806 + msg = "being in untrusted group";
56807 +#endif
56808 + }
56809 + if (!msg && gr_acl_tpe_check())
56810 + msg = "being in untrusted role";
56811 +
56812 + // not in any affected group/role
56813 + if (!msg)
56814 + goto next_check;
56815 +
56816 + if (inode->i_uid)
56817 + msg2 = "file in non-root-owned directory";
56818 + else if (inode->i_mode & S_IWOTH)
56819 + msg2 = "file in world-writable directory";
56820 + else if (inode->i_mode & S_IWGRP)
56821 + msg2 = "file in group-writable directory";
56822 +
56823 + if (msg && msg2) {
56824 + char fullmsg[64] = {0};
56825 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
56826 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
56827 + return 0;
56828 + }
56829 + msg = NULL;
56830 +next_check:
56831 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56832 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
56833 + return 1;
56834 +
56835 + if (inode->i_uid && (inode->i_uid != cred->uid))
56836 + msg = "directory not owned by user";
56837 + else if (inode->i_mode & S_IWOTH)
56838 + msg = "file in world-writable directory";
56839 + else if (inode->i_mode & S_IWGRP)
56840 + msg = "file in group-writable directory";
56841 +
56842 + if (msg) {
56843 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
56844 + return 0;
56845 + }
56846 +#endif
56847 +#endif
56848 + return 1;
56849 +}
56850 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
56851 new file mode 100644
56852 index 0000000..9f7b1ac
56853 --- /dev/null
56854 +++ b/grsecurity/grsum.c
56855 @@ -0,0 +1,61 @@
56856 +#include <linux/err.h>
56857 +#include <linux/kernel.h>
56858 +#include <linux/sched.h>
56859 +#include <linux/mm.h>
56860 +#include <linux/scatterlist.h>
56861 +#include <linux/crypto.h>
56862 +#include <linux/gracl.h>
56863 +
56864 +
56865 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
56866 +#error "crypto and sha256 must be built into the kernel"
56867 +#endif
56868 +
56869 +int
56870 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
56871 +{
56872 + char *p;
56873 + struct crypto_hash *tfm;
56874 + struct hash_desc desc;
56875 + struct scatterlist sg;
56876 + unsigned char temp_sum[GR_SHA_LEN];
56877 + volatile int retval = 0;
56878 + volatile int dummy = 0;
56879 + unsigned int i;
56880 +
56881 + sg_init_table(&sg, 1);
56882 +
56883 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
56884 + if (IS_ERR(tfm)) {
56885 + /* should never happen, since sha256 should be built in */
56886 + return 1;
56887 + }
56888 +
56889 + desc.tfm = tfm;
56890 + desc.flags = 0;
56891 +
56892 + crypto_hash_init(&desc);
56893 +
56894 + p = salt;
56895 + sg_set_buf(&sg, p, GR_SALT_LEN);
56896 + crypto_hash_update(&desc, &sg, sg.length);
56897 +
56898 + p = entry->pw;
56899 + sg_set_buf(&sg, p, strlen(p));
56900 +
56901 + crypto_hash_update(&desc, &sg, sg.length);
56902 +
56903 + crypto_hash_final(&desc, temp_sum);
56904 +
56905 + memset(entry->pw, 0, GR_PW_LEN);
56906 +
56907 + for (i = 0; i < GR_SHA_LEN; i++)
56908 + if (sum[i] != temp_sum[i])
56909 + retval = 1;
56910 + else
56911 + dummy = 1; // waste a cycle
56912 +
56913 + crypto_free_hash(tfm);
56914 +
56915 + return retval;
56916 +}
56917 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
56918 index 6cd5b64..f620d2d 100644
56919 --- a/include/acpi/acpi_bus.h
56920 +++ b/include/acpi/acpi_bus.h
56921 @@ -107,7 +107,7 @@ struct acpi_device_ops {
56922 acpi_op_bind bind;
56923 acpi_op_unbind unbind;
56924 acpi_op_notify notify;
56925 -};
56926 +} __no_const;
56927
56928 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56929
56930 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
56931 index b7babf0..71e4e74 100644
56932 --- a/include/asm-generic/atomic-long.h
56933 +++ b/include/asm-generic/atomic-long.h
56934 @@ -22,6 +22,12 @@
56935
56936 typedef atomic64_t atomic_long_t;
56937
56938 +#ifdef CONFIG_PAX_REFCOUNT
56939 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
56940 +#else
56941 +typedef atomic64_t atomic_long_unchecked_t;
56942 +#endif
56943 +
56944 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56945
56946 static inline long atomic_long_read(atomic_long_t *l)
56947 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
56948 return (long)atomic64_read(v);
56949 }
56950
56951 +#ifdef CONFIG_PAX_REFCOUNT
56952 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56953 +{
56954 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56955 +
56956 + return (long)atomic64_read_unchecked(v);
56957 +}
56958 +#endif
56959 +
56960 static inline void atomic_long_set(atomic_long_t *l, long i)
56961 {
56962 atomic64_t *v = (atomic64_t *)l;
56963 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
56964 atomic64_set(v, i);
56965 }
56966
56967 +#ifdef CONFIG_PAX_REFCOUNT
56968 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56969 +{
56970 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56971 +
56972 + atomic64_set_unchecked(v, i);
56973 +}
56974 +#endif
56975 +
56976 static inline void atomic_long_inc(atomic_long_t *l)
56977 {
56978 atomic64_t *v = (atomic64_t *)l;
56979 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
56980 atomic64_inc(v);
56981 }
56982
56983 +#ifdef CONFIG_PAX_REFCOUNT
56984 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56985 +{
56986 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56987 +
56988 + atomic64_inc_unchecked(v);
56989 +}
56990 +#endif
56991 +
56992 static inline void atomic_long_dec(atomic_long_t *l)
56993 {
56994 atomic64_t *v = (atomic64_t *)l;
56995 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
56996 atomic64_dec(v);
56997 }
56998
56999 +#ifdef CONFIG_PAX_REFCOUNT
57000 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57001 +{
57002 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57003 +
57004 + atomic64_dec_unchecked(v);
57005 +}
57006 +#endif
57007 +
57008 static inline void atomic_long_add(long i, atomic_long_t *l)
57009 {
57010 atomic64_t *v = (atomic64_t *)l;
57011 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57012 atomic64_add(i, v);
57013 }
57014
57015 +#ifdef CONFIG_PAX_REFCOUNT
57016 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57017 +{
57018 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57019 +
57020 + atomic64_add_unchecked(i, v);
57021 +}
57022 +#endif
57023 +
57024 static inline void atomic_long_sub(long i, atomic_long_t *l)
57025 {
57026 atomic64_t *v = (atomic64_t *)l;
57027 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57028 atomic64_sub(i, v);
57029 }
57030
57031 +#ifdef CONFIG_PAX_REFCOUNT
57032 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57033 +{
57034 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57035 +
57036 + atomic64_sub_unchecked(i, v);
57037 +}
57038 +#endif
57039 +
57040 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57041 {
57042 atomic64_t *v = (atomic64_t *)l;
57043 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57044 return (long)atomic64_inc_return(v);
57045 }
57046
57047 +#ifdef CONFIG_PAX_REFCOUNT
57048 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57049 +{
57050 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57051 +
57052 + return (long)atomic64_inc_return_unchecked(v);
57053 +}
57054 +#endif
57055 +
57056 static inline long atomic_long_dec_return(atomic_long_t *l)
57057 {
57058 atomic64_t *v = (atomic64_t *)l;
57059 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57060
57061 typedef atomic_t atomic_long_t;
57062
57063 +#ifdef CONFIG_PAX_REFCOUNT
57064 +typedef atomic_unchecked_t atomic_long_unchecked_t;
57065 +#else
57066 +typedef atomic_t atomic_long_unchecked_t;
57067 +#endif
57068 +
57069 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57070 static inline long atomic_long_read(atomic_long_t *l)
57071 {
57072 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57073 return (long)atomic_read(v);
57074 }
57075
57076 +#ifdef CONFIG_PAX_REFCOUNT
57077 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57078 +{
57079 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57080 +
57081 + return (long)atomic_read_unchecked(v);
57082 +}
57083 +#endif
57084 +
57085 static inline void atomic_long_set(atomic_long_t *l, long i)
57086 {
57087 atomic_t *v = (atomic_t *)l;
57088 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57089 atomic_set(v, i);
57090 }
57091
57092 +#ifdef CONFIG_PAX_REFCOUNT
57093 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57094 +{
57095 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57096 +
57097 + atomic_set_unchecked(v, i);
57098 +}
57099 +#endif
57100 +
57101 static inline void atomic_long_inc(atomic_long_t *l)
57102 {
57103 atomic_t *v = (atomic_t *)l;
57104 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57105 atomic_inc(v);
57106 }
57107
57108 +#ifdef CONFIG_PAX_REFCOUNT
57109 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57110 +{
57111 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57112 +
57113 + atomic_inc_unchecked(v);
57114 +}
57115 +#endif
57116 +
57117 static inline void atomic_long_dec(atomic_long_t *l)
57118 {
57119 atomic_t *v = (atomic_t *)l;
57120 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57121 atomic_dec(v);
57122 }
57123
57124 +#ifdef CONFIG_PAX_REFCOUNT
57125 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57126 +{
57127 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57128 +
57129 + atomic_dec_unchecked(v);
57130 +}
57131 +#endif
57132 +
57133 static inline void atomic_long_add(long i, atomic_long_t *l)
57134 {
57135 atomic_t *v = (atomic_t *)l;
57136 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57137 atomic_add(i, v);
57138 }
57139
57140 +#ifdef CONFIG_PAX_REFCOUNT
57141 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57142 +{
57143 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57144 +
57145 + atomic_add_unchecked(i, v);
57146 +}
57147 +#endif
57148 +
57149 static inline void atomic_long_sub(long i, atomic_long_t *l)
57150 {
57151 atomic_t *v = (atomic_t *)l;
57152 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57153 atomic_sub(i, v);
57154 }
57155
57156 +#ifdef CONFIG_PAX_REFCOUNT
57157 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57158 +{
57159 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57160 +
57161 + atomic_sub_unchecked(i, v);
57162 +}
57163 +#endif
57164 +
57165 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57166 {
57167 atomic_t *v = (atomic_t *)l;
57168 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57169 return (long)atomic_inc_return(v);
57170 }
57171
57172 +#ifdef CONFIG_PAX_REFCOUNT
57173 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57174 +{
57175 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57176 +
57177 + return (long)atomic_inc_return_unchecked(v);
57178 +}
57179 +#endif
57180 +
57181 static inline long atomic_long_dec_return(atomic_long_t *l)
57182 {
57183 atomic_t *v = (atomic_t *)l;
57184 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57185
57186 #endif /* BITS_PER_LONG == 64 */
57187
57188 +#ifdef CONFIG_PAX_REFCOUNT
57189 +static inline void pax_refcount_needs_these_functions(void)
57190 +{
57191 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
57192 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57193 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57194 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57195 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57196 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57197 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57198 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57199 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57200 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57201 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57202 +
57203 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57204 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57205 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57206 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57207 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57208 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57209 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57210 +}
57211 +#else
57212 +#define atomic_read_unchecked(v) atomic_read(v)
57213 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57214 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57215 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57216 +#define atomic_inc_unchecked(v) atomic_inc(v)
57217 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57218 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57219 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57220 +#define atomic_dec_unchecked(v) atomic_dec(v)
57221 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57222 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57223 +
57224 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
57225 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57226 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57227 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57228 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57229 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57230 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57231 +#endif
57232 +
57233 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57234 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57235 index b18ce4f..2ee2843 100644
57236 --- a/include/asm-generic/atomic64.h
57237 +++ b/include/asm-generic/atomic64.h
57238 @@ -16,6 +16,8 @@ typedef struct {
57239 long long counter;
57240 } atomic64_t;
57241
57242 +typedef atomic64_t atomic64_unchecked_t;
57243 +
57244 #define ATOMIC64_INIT(i) { (i) }
57245
57246 extern long long atomic64_read(const atomic64_t *v);
57247 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57248 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57249 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57250
57251 +#define atomic64_read_unchecked(v) atomic64_read(v)
57252 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57253 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57254 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57255 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57256 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
57257 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57258 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
57259 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57260 +
57261 #endif /* _ASM_GENERIC_ATOMIC64_H */
57262 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57263 index 1bfcfe5..e04c5c9 100644
57264 --- a/include/asm-generic/cache.h
57265 +++ b/include/asm-generic/cache.h
57266 @@ -6,7 +6,7 @@
57267 * cache lines need to provide their own cache.h.
57268 */
57269
57270 -#define L1_CACHE_SHIFT 5
57271 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57272 +#define L1_CACHE_SHIFT 5UL
57273 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57274
57275 #endif /* __ASM_GENERIC_CACHE_H */
57276 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57277 index 1ca3efc..e3dc852 100644
57278 --- a/include/asm-generic/int-l64.h
57279 +++ b/include/asm-generic/int-l64.h
57280 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57281 typedef signed long s64;
57282 typedef unsigned long u64;
57283
57284 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57285 +
57286 #define S8_C(x) x
57287 #define U8_C(x) x ## U
57288 #define S16_C(x) x
57289 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57290 index f394147..b6152b9 100644
57291 --- a/include/asm-generic/int-ll64.h
57292 +++ b/include/asm-generic/int-ll64.h
57293 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57294 typedef signed long long s64;
57295 typedef unsigned long long u64;
57296
57297 +typedef unsigned long long intoverflow_t;
57298 +
57299 #define S8_C(x) x
57300 #define U8_C(x) x ## U
57301 #define S16_C(x) x
57302 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57303 index 0232ccb..13d9165 100644
57304 --- a/include/asm-generic/kmap_types.h
57305 +++ b/include/asm-generic/kmap_types.h
57306 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57307 KMAP_D(17) KM_NMI,
57308 KMAP_D(18) KM_NMI_PTE,
57309 KMAP_D(19) KM_KDB,
57310 +KMAP_D(20) KM_CLEARPAGE,
57311 /*
57312 * Remember to update debug_kmap_atomic() when adding new kmap types!
57313 */
57314 -KMAP_D(20) KM_TYPE_NR
57315 +KMAP_D(21) KM_TYPE_NR
57316 };
57317
57318 #undef KMAP_D
57319 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57320 index 725612b..9cc513a 100644
57321 --- a/include/asm-generic/pgtable-nopmd.h
57322 +++ b/include/asm-generic/pgtable-nopmd.h
57323 @@ -1,14 +1,19 @@
57324 #ifndef _PGTABLE_NOPMD_H
57325 #define _PGTABLE_NOPMD_H
57326
57327 -#ifndef __ASSEMBLY__
57328 -
57329 #include <asm-generic/pgtable-nopud.h>
57330
57331 -struct mm_struct;
57332 -
57333 #define __PAGETABLE_PMD_FOLDED
57334
57335 +#define PMD_SHIFT PUD_SHIFT
57336 +#define PTRS_PER_PMD 1
57337 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57338 +#define PMD_MASK (~(PMD_SIZE-1))
57339 +
57340 +#ifndef __ASSEMBLY__
57341 +
57342 +struct mm_struct;
57343 +
57344 /*
57345 * Having the pmd type consist of a pud gets the size right, and allows
57346 * us to conceptually access the pud entry that this pmd is folded into
57347 @@ -16,11 +21,6 @@ struct mm_struct;
57348 */
57349 typedef struct { pud_t pud; } pmd_t;
57350
57351 -#define PMD_SHIFT PUD_SHIFT
57352 -#define PTRS_PER_PMD 1
57353 -#define PMD_SIZE (1UL << PMD_SHIFT)
57354 -#define PMD_MASK (~(PMD_SIZE-1))
57355 -
57356 /*
57357 * The "pud_xxx()" functions here are trivial for a folded two-level
57358 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57359 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57360 index 810431d..ccc3638 100644
57361 --- a/include/asm-generic/pgtable-nopud.h
57362 +++ b/include/asm-generic/pgtable-nopud.h
57363 @@ -1,10 +1,15 @@
57364 #ifndef _PGTABLE_NOPUD_H
57365 #define _PGTABLE_NOPUD_H
57366
57367 -#ifndef __ASSEMBLY__
57368 -
57369 #define __PAGETABLE_PUD_FOLDED
57370
57371 +#define PUD_SHIFT PGDIR_SHIFT
57372 +#define PTRS_PER_PUD 1
57373 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57374 +#define PUD_MASK (~(PUD_SIZE-1))
57375 +
57376 +#ifndef __ASSEMBLY__
57377 +
57378 /*
57379 * Having the pud type consist of a pgd gets the size right, and allows
57380 * us to conceptually access the pgd entry that this pud is folded into
57381 @@ -12,11 +17,6 @@
57382 */
57383 typedef struct { pgd_t pgd; } pud_t;
57384
57385 -#define PUD_SHIFT PGDIR_SHIFT
57386 -#define PTRS_PER_PUD 1
57387 -#define PUD_SIZE (1UL << PUD_SHIFT)
57388 -#define PUD_MASK (~(PUD_SIZE-1))
57389 -
57390 /*
57391 * The "pgd_xxx()" functions here are trivial for a folded two-level
57392 * setup: the pud is never bad, and a pud always exists (as it's folded
57393 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57394 index 76bff2b..c7a14e2 100644
57395 --- a/include/asm-generic/pgtable.h
57396 +++ b/include/asm-generic/pgtable.h
57397 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57398 #endif /* __HAVE_ARCH_PMD_WRITE */
57399 #endif
57400
57401 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57402 +static inline unsigned long pax_open_kernel(void) { return 0; }
57403 +#endif
57404 +
57405 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57406 +static inline unsigned long pax_close_kernel(void) { return 0; }
57407 +#endif
57408 +
57409 #endif /* !__ASSEMBLY__ */
57410
57411 #endif /* _ASM_GENERIC_PGTABLE_H */
57412 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57413 index b5e2e4c..6a5373e 100644
57414 --- a/include/asm-generic/vmlinux.lds.h
57415 +++ b/include/asm-generic/vmlinux.lds.h
57416 @@ -217,6 +217,7 @@
57417 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57418 VMLINUX_SYMBOL(__start_rodata) = .; \
57419 *(.rodata) *(.rodata.*) \
57420 + *(.data..read_only) \
57421 *(__vermagic) /* Kernel version magic */ \
57422 . = ALIGN(8); \
57423 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57424 @@ -722,17 +723,18 @@
57425 * section in the linker script will go there too. @phdr should have
57426 * a leading colon.
57427 *
57428 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57429 + * Note that this macros defines per_cpu_load as an absolute symbol.
57430 * If there is no need to put the percpu section at a predetermined
57431 * address, use PERCPU_SECTION.
57432 */
57433 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57434 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57435 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57436 + per_cpu_load = .; \
57437 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57438 - LOAD_OFFSET) { \
57439 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57440 PERCPU_INPUT(cacheline) \
57441 } phdr \
57442 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57443 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57444
57445 /**
57446 * PERCPU_SECTION - define output section for percpu area, simple version
57447 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57448 index bf4b2dc..2d0762f 100644
57449 --- a/include/drm/drmP.h
57450 +++ b/include/drm/drmP.h
57451 @@ -72,6 +72,7 @@
57452 #include <linux/workqueue.h>
57453 #include <linux/poll.h>
57454 #include <asm/pgalloc.h>
57455 +#include <asm/local.h>
57456 #include "drm.h"
57457
57458 #include <linux/idr.h>
57459 @@ -1038,7 +1039,7 @@ struct drm_device {
57460
57461 /** \name Usage Counters */
57462 /*@{ */
57463 - int open_count; /**< Outstanding files open */
57464 + local_t open_count; /**< Outstanding files open */
57465 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57466 atomic_t vma_count; /**< Outstanding vma areas open */
57467 int buf_use; /**< Buffers in use -- cannot alloc */
57468 @@ -1049,7 +1050,7 @@ struct drm_device {
57469 /*@{ */
57470 unsigned long counters;
57471 enum drm_stat_type types[15];
57472 - atomic_t counts[15];
57473 + atomic_unchecked_t counts[15];
57474 /*@} */
57475
57476 struct list_head filelist;
57477 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57478 index 73b0712..0b7ef2f 100644
57479 --- a/include/drm/drm_crtc_helper.h
57480 +++ b/include/drm/drm_crtc_helper.h
57481 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57482
57483 /* disable crtc when not in use - more explicit than dpms off */
57484 void (*disable)(struct drm_crtc *crtc);
57485 -};
57486 +} __no_const;
57487
57488 struct drm_encoder_helper_funcs {
57489 void (*dpms)(struct drm_encoder *encoder, int mode);
57490 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57491 struct drm_connector *connector);
57492 /* disable encoder when not in use - more explicit than dpms off */
57493 void (*disable)(struct drm_encoder *encoder);
57494 -};
57495 +} __no_const;
57496
57497 struct drm_connector_helper_funcs {
57498 int (*get_modes)(struct drm_connector *connector);
57499 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57500 index 26c1f78..6722682 100644
57501 --- a/include/drm/ttm/ttm_memory.h
57502 +++ b/include/drm/ttm/ttm_memory.h
57503 @@ -47,7 +47,7 @@
57504
57505 struct ttm_mem_shrink {
57506 int (*do_shrink) (struct ttm_mem_shrink *);
57507 -};
57508 +} __no_const;
57509
57510 /**
57511 * struct ttm_mem_global - Global memory accounting structure.
57512 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57513 index e86dfca..40cc55f 100644
57514 --- a/include/linux/a.out.h
57515 +++ b/include/linux/a.out.h
57516 @@ -39,6 +39,14 @@ enum machine_type {
57517 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57518 };
57519
57520 +/* Constants for the N_FLAGS field */
57521 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57522 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57523 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57524 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57525 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57526 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57527 +
57528 #if !defined (N_MAGIC)
57529 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57530 #endif
57531 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57532 index 49a83ca..df96b54 100644
57533 --- a/include/linux/atmdev.h
57534 +++ b/include/linux/atmdev.h
57535 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57536 #endif
57537
57538 struct k_atm_aal_stats {
57539 -#define __HANDLE_ITEM(i) atomic_t i
57540 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57541 __AAL_STAT_ITEMS
57542 #undef __HANDLE_ITEM
57543 };
57544 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57545 index fd88a39..f4d0bad 100644
57546 --- a/include/linux/binfmts.h
57547 +++ b/include/linux/binfmts.h
57548 @@ -88,6 +88,7 @@ struct linux_binfmt {
57549 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57550 int (*load_shlib)(struct file *);
57551 int (*core_dump)(struct coredump_params *cprm);
57552 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57553 unsigned long min_coredump; /* minimal dump size */
57554 };
57555
57556 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57557 index 0ed1eb0..3ab569b 100644
57558 --- a/include/linux/blkdev.h
57559 +++ b/include/linux/blkdev.h
57560 @@ -1315,7 +1315,7 @@ struct block_device_operations {
57561 /* this callback is with swap_lock and sometimes page table lock held */
57562 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57563 struct module *owner;
57564 -};
57565 +} __do_const;
57566
57567 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57568 unsigned long);
57569 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57570 index 4d1a074..88f929a 100644
57571 --- a/include/linux/blktrace_api.h
57572 +++ b/include/linux/blktrace_api.h
57573 @@ -162,7 +162,7 @@ struct blk_trace {
57574 struct dentry *dir;
57575 struct dentry *dropped_file;
57576 struct dentry *msg_file;
57577 - atomic_t dropped;
57578 + atomic_unchecked_t dropped;
57579 };
57580
57581 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57582 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57583 index 83195fb..0b0f77d 100644
57584 --- a/include/linux/byteorder/little_endian.h
57585 +++ b/include/linux/byteorder/little_endian.h
57586 @@ -42,51 +42,51 @@
57587
57588 static inline __le64 __cpu_to_le64p(const __u64 *p)
57589 {
57590 - return (__force __le64)*p;
57591 + return (__force const __le64)*p;
57592 }
57593 static inline __u64 __le64_to_cpup(const __le64 *p)
57594 {
57595 - return (__force __u64)*p;
57596 + return (__force const __u64)*p;
57597 }
57598 static inline __le32 __cpu_to_le32p(const __u32 *p)
57599 {
57600 - return (__force __le32)*p;
57601 + return (__force const __le32)*p;
57602 }
57603 static inline __u32 __le32_to_cpup(const __le32 *p)
57604 {
57605 - return (__force __u32)*p;
57606 + return (__force const __u32)*p;
57607 }
57608 static inline __le16 __cpu_to_le16p(const __u16 *p)
57609 {
57610 - return (__force __le16)*p;
57611 + return (__force const __le16)*p;
57612 }
57613 static inline __u16 __le16_to_cpup(const __le16 *p)
57614 {
57615 - return (__force __u16)*p;
57616 + return (__force const __u16)*p;
57617 }
57618 static inline __be64 __cpu_to_be64p(const __u64 *p)
57619 {
57620 - return (__force __be64)__swab64p(p);
57621 + return (__force const __be64)__swab64p(p);
57622 }
57623 static inline __u64 __be64_to_cpup(const __be64 *p)
57624 {
57625 - return __swab64p((__u64 *)p);
57626 + return __swab64p((const __u64 *)p);
57627 }
57628 static inline __be32 __cpu_to_be32p(const __u32 *p)
57629 {
57630 - return (__force __be32)__swab32p(p);
57631 + return (__force const __be32)__swab32p(p);
57632 }
57633 static inline __u32 __be32_to_cpup(const __be32 *p)
57634 {
57635 - return __swab32p((__u32 *)p);
57636 + return __swab32p((const __u32 *)p);
57637 }
57638 static inline __be16 __cpu_to_be16p(const __u16 *p)
57639 {
57640 - return (__force __be16)__swab16p(p);
57641 + return (__force const __be16)__swab16p(p);
57642 }
57643 static inline __u16 __be16_to_cpup(const __be16 *p)
57644 {
57645 - return __swab16p((__u16 *)p);
57646 + return __swab16p((const __u16 *)p);
57647 }
57648 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57649 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57650 diff --git a/include/linux/cache.h b/include/linux/cache.h
57651 index 4c57065..4307975 100644
57652 --- a/include/linux/cache.h
57653 +++ b/include/linux/cache.h
57654 @@ -16,6 +16,10 @@
57655 #define __read_mostly
57656 #endif
57657
57658 +#ifndef __read_only
57659 +#define __read_only __read_mostly
57660 +#endif
57661 +
57662 #ifndef ____cacheline_aligned
57663 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57664 #endif
57665 diff --git a/include/linux/capability.h b/include/linux/capability.h
57666 index a63d13d..069bfd5 100644
57667 --- a/include/linux/capability.h
57668 +++ b/include/linux/capability.h
57669 @@ -548,6 +548,9 @@ extern bool capable(int cap);
57670 extern bool ns_capable(struct user_namespace *ns, int cap);
57671 extern bool task_ns_capable(struct task_struct *t, int cap);
57672 extern bool nsown_capable(int cap);
57673 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57674 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57675 +extern bool capable_nolog(int cap);
57676
57677 /* audit system wants to get cap info from files as well */
57678 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57679 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
57680 index 04ffb2e..6799180 100644
57681 --- a/include/linux/cleancache.h
57682 +++ b/include/linux/cleancache.h
57683 @@ -31,7 +31,7 @@ struct cleancache_ops {
57684 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57685 void (*flush_inode)(int, struct cleancache_filekey);
57686 void (*flush_fs)(int);
57687 -};
57688 +} __no_const;
57689
57690 extern struct cleancache_ops
57691 cleancache_register_ops(struct cleancache_ops *ops);
57692 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
57693 index dfadc96..c0e70c1 100644
57694 --- a/include/linux/compiler-gcc4.h
57695 +++ b/include/linux/compiler-gcc4.h
57696 @@ -31,6 +31,12 @@
57697
57698
57699 #if __GNUC_MINOR__ >= 5
57700 +
57701 +#ifdef CONSTIFY_PLUGIN
57702 +#define __no_const __attribute__((no_const))
57703 +#define __do_const __attribute__((do_const))
57704 +#endif
57705 +
57706 /*
57707 * Mark a position in code as unreachable. This can be used to
57708 * suppress control flow warnings after asm blocks that transfer
57709 @@ -46,6 +52,11 @@
57710 #define __noclone __attribute__((__noclone__))
57711
57712 #endif
57713 +
57714 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57715 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57716 +#define __bos0(ptr) __bos((ptr), 0)
57717 +#define __bos1(ptr) __bos((ptr), 1)
57718 #endif
57719
57720 #if __GNUC_MINOR__ > 0
57721 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
57722 index 320d6c9..8573a1c 100644
57723 --- a/include/linux/compiler.h
57724 +++ b/include/linux/compiler.h
57725 @@ -5,31 +5,62 @@
57726
57727 #ifdef __CHECKER__
57728 # define __user __attribute__((noderef, address_space(1)))
57729 +# define __force_user __force __user
57730 # define __kernel __attribute__((address_space(0)))
57731 +# define __force_kernel __force __kernel
57732 # define __safe __attribute__((safe))
57733 # define __force __attribute__((force))
57734 # define __nocast __attribute__((nocast))
57735 # define __iomem __attribute__((noderef, address_space(2)))
57736 +# define __force_iomem __force __iomem
57737 # define __acquires(x) __attribute__((context(x,0,1)))
57738 # define __releases(x) __attribute__((context(x,1,0)))
57739 # define __acquire(x) __context__(x,1)
57740 # define __release(x) __context__(x,-1)
57741 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57742 # define __percpu __attribute__((noderef, address_space(3)))
57743 +# define __force_percpu __force __percpu
57744 #ifdef CONFIG_SPARSE_RCU_POINTER
57745 # define __rcu __attribute__((noderef, address_space(4)))
57746 +# define __force_rcu __force __rcu
57747 #else
57748 # define __rcu
57749 +# define __force_rcu
57750 #endif
57751 extern void __chk_user_ptr(const volatile void __user *);
57752 extern void __chk_io_ptr(const volatile void __iomem *);
57753 +#elif defined(CHECKER_PLUGIN)
57754 +//# define __user
57755 +//# define __force_user
57756 +//# define __kernel
57757 +//# define __force_kernel
57758 +# define __safe
57759 +# define __force
57760 +# define __nocast
57761 +# define __iomem
57762 +# define __force_iomem
57763 +# define __chk_user_ptr(x) (void)0
57764 +# define __chk_io_ptr(x) (void)0
57765 +# define __builtin_warning(x, y...) (1)
57766 +# define __acquires(x)
57767 +# define __releases(x)
57768 +# define __acquire(x) (void)0
57769 +# define __release(x) (void)0
57770 +# define __cond_lock(x,c) (c)
57771 +# define __percpu
57772 +# define __force_percpu
57773 +# define __rcu
57774 +# define __force_rcu
57775 #else
57776 # define __user
57777 +# define __force_user
57778 # define __kernel
57779 +# define __force_kernel
57780 # define __safe
57781 # define __force
57782 # define __nocast
57783 # define __iomem
57784 +# define __force_iomem
57785 # define __chk_user_ptr(x) (void)0
57786 # define __chk_io_ptr(x) (void)0
57787 # define __builtin_warning(x, y...) (1)
57788 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
57789 # define __release(x) (void)0
57790 # define __cond_lock(x,c) (c)
57791 # define __percpu
57792 +# define __force_percpu
57793 # define __rcu
57794 +# define __force_rcu
57795 #endif
57796
57797 #ifdef __KERNEL__
57798 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57799 # define __attribute_const__ /* unimplemented */
57800 #endif
57801
57802 +#ifndef __no_const
57803 +# define __no_const
57804 +#endif
57805 +
57806 +#ifndef __do_const
57807 +# define __do_const
57808 +#endif
57809 +
57810 /*
57811 * Tell gcc if a function is cold. The compiler will assume any path
57812 * directly leading to the call is unlikely.
57813 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57814 #define __cold
57815 #endif
57816
57817 +#ifndef __alloc_size
57818 +#define __alloc_size(...)
57819 +#endif
57820 +
57821 +#ifndef __bos
57822 +#define __bos(ptr, arg)
57823 +#endif
57824 +
57825 +#ifndef __bos0
57826 +#define __bos0(ptr)
57827 +#endif
57828 +
57829 +#ifndef __bos1
57830 +#define __bos1(ptr)
57831 +#endif
57832 +
57833 /* Simple shorthand for a section definition */
57834 #ifndef __section
57835 # define __section(S) __attribute__ ((__section__(#S)))
57836 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57837 * use is to mediate communication between process-level code and irq/NMI
57838 * handlers, all running on the same CPU.
57839 */
57840 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57841 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57842 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57843
57844 #endif /* __LINUX_COMPILER_H */
57845 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
57846 index e9eaec5..bfeb9bb 100644
57847 --- a/include/linux/cpuset.h
57848 +++ b/include/linux/cpuset.h
57849 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
57850 * nodemask.
57851 */
57852 smp_mb();
57853 - --ACCESS_ONCE(current->mems_allowed_change_disable);
57854 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57855 }
57856
57857 static inline void set_mems_allowed(nodemask_t nodemask)
57858 diff --git a/include/linux/cred.h b/include/linux/cred.h
57859 index 4030896..8d6f342 100644
57860 --- a/include/linux/cred.h
57861 +++ b/include/linux/cred.h
57862 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
57863 static inline void validate_process_creds(void)
57864 {
57865 }
57866 +static inline void validate_task_creds(struct task_struct *task)
57867 +{
57868 +}
57869 #endif
57870
57871 /**
57872 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
57873 index 8a94217..15d49e3 100644
57874 --- a/include/linux/crypto.h
57875 +++ b/include/linux/crypto.h
57876 @@ -365,7 +365,7 @@ struct cipher_tfm {
57877 const u8 *key, unsigned int keylen);
57878 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57879 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57880 -};
57881 +} __no_const;
57882
57883 struct hash_tfm {
57884 int (*init)(struct hash_desc *desc);
57885 @@ -386,13 +386,13 @@ struct compress_tfm {
57886 int (*cot_decompress)(struct crypto_tfm *tfm,
57887 const u8 *src, unsigned int slen,
57888 u8 *dst, unsigned int *dlen);
57889 -};
57890 +} __no_const;
57891
57892 struct rng_tfm {
57893 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57894 unsigned int dlen);
57895 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57896 -};
57897 +} __no_const;
57898
57899 #define crt_ablkcipher crt_u.ablkcipher
57900 #define crt_aead crt_u.aead
57901 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
57902 index 7925bf0..d5143d2 100644
57903 --- a/include/linux/decompress/mm.h
57904 +++ b/include/linux/decompress/mm.h
57905 @@ -77,7 +77,7 @@ static void free(void *where)
57906 * warnings when not needed (indeed large_malloc / large_free are not
57907 * needed by inflate */
57908
57909 -#define malloc(a) kmalloc(a, GFP_KERNEL)
57910 +#define malloc(a) kmalloc((a), GFP_KERNEL)
57911 #define free(a) kfree(a)
57912
57913 #define large_malloc(a) vmalloc(a)
57914 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
57915 index e13117c..e9fc938 100644
57916 --- a/include/linux/dma-mapping.h
57917 +++ b/include/linux/dma-mapping.h
57918 @@ -46,7 +46,7 @@ struct dma_map_ops {
57919 u64 (*get_required_mask)(struct device *dev);
57920 #endif
57921 int is_phys;
57922 -};
57923 +} __do_const;
57924
57925 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57926
57927 diff --git a/include/linux/efi.h b/include/linux/efi.h
57928 index 2362a0b..cfaf8fcc 100644
57929 --- a/include/linux/efi.h
57930 +++ b/include/linux/efi.h
57931 @@ -446,7 +446,7 @@ struct efivar_operations {
57932 efi_get_variable_t *get_variable;
57933 efi_get_next_variable_t *get_next_variable;
57934 efi_set_variable_t *set_variable;
57935 -};
57936 +} __no_const;
57937
57938 struct efivars {
57939 /*
57940 diff --git a/include/linux/elf.h b/include/linux/elf.h
57941 index 31f0508..5421c01 100644
57942 --- a/include/linux/elf.h
57943 +++ b/include/linux/elf.h
57944 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57945 #define PT_GNU_EH_FRAME 0x6474e550
57946
57947 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57948 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57949 +
57950 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57951 +
57952 +/* Constants for the e_flags field */
57953 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57954 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57955 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57956 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57957 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57958 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57959
57960 /*
57961 * Extended Numbering
57962 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
57963 #define DT_DEBUG 21
57964 #define DT_TEXTREL 22
57965 #define DT_JMPREL 23
57966 +#define DT_FLAGS 30
57967 + #define DF_TEXTREL 0x00000004
57968 #define DT_ENCODING 32
57969 #define OLD_DT_LOOS 0x60000000
57970 #define DT_LOOS 0x6000000d
57971 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
57972 #define PF_W 0x2
57973 #define PF_X 0x1
57974
57975 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57976 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57977 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57978 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57979 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57980 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57981 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57982 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57983 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57984 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57985 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57986 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57987 +
57988 typedef struct elf32_phdr{
57989 Elf32_Word p_type;
57990 Elf32_Off p_offset;
57991 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
57992 #define EI_OSABI 7
57993 #define EI_PAD 8
57994
57995 +#define EI_PAX 14
57996 +
57997 #define ELFMAG0 0x7f /* EI_MAG */
57998 #define ELFMAG1 'E'
57999 #define ELFMAG2 'L'
58000 @@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58001 #define elf_note elf32_note
58002 #define elf_addr_t Elf32_Off
58003 #define Elf_Half Elf32_Half
58004 +#define elf_dyn Elf32_Dyn
58005
58006 #else
58007
58008 @@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58009 #define elf_note elf64_note
58010 #define elf_addr_t Elf64_Off
58011 #define Elf_Half Elf64_Half
58012 +#define elf_dyn Elf64_Dyn
58013
58014 #endif
58015
58016 diff --git a/include/linux/filter.h b/include/linux/filter.h
58017 index 8eeb205..d59bfa2 100644
58018 --- a/include/linux/filter.h
58019 +++ b/include/linux/filter.h
58020 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58021
58022 struct sk_buff;
58023 struct sock;
58024 +struct bpf_jit_work;
58025
58026 struct sk_filter
58027 {
58028 @@ -141,6 +142,9 @@ struct sk_filter
58029 unsigned int len; /* Number of filter blocks */
58030 unsigned int (*bpf_func)(const struct sk_buff *skb,
58031 const struct sock_filter *filter);
58032 +#ifdef CONFIG_BPF_JIT
58033 + struct bpf_jit_work *work;
58034 +#endif
58035 struct rcu_head rcu;
58036 struct sock_filter insns[0];
58037 };
58038 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58039 index 84ccf8e..2e9b14c 100644
58040 --- a/include/linux/firewire.h
58041 +++ b/include/linux/firewire.h
58042 @@ -428,7 +428,7 @@ struct fw_iso_context {
58043 union {
58044 fw_iso_callback_t sc;
58045 fw_iso_mc_callback_t mc;
58046 - } callback;
58047 + } __no_const callback;
58048 void *callback_data;
58049 };
58050
58051 diff --git a/include/linux/fs.h b/include/linux/fs.h
58052 index e0bc4ff..d79c2fa 100644
58053 --- a/include/linux/fs.h
58054 +++ b/include/linux/fs.h
58055 @@ -1608,7 +1608,8 @@ struct file_operations {
58056 int (*setlease)(struct file *, long, struct file_lock **);
58057 long (*fallocate)(struct file *file, int mode, loff_t offset,
58058 loff_t len);
58059 -};
58060 +} __do_const;
58061 +typedef struct file_operations __no_const file_operations_no_const;
58062
58063 struct inode_operations {
58064 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58065 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58066 index 003dc0f..3c4ea97 100644
58067 --- a/include/linux/fs_struct.h
58068 +++ b/include/linux/fs_struct.h
58069 @@ -6,7 +6,7 @@
58070 #include <linux/seqlock.h>
58071
58072 struct fs_struct {
58073 - int users;
58074 + atomic_t users;
58075 spinlock_t lock;
58076 seqcount_t seq;
58077 int umask;
58078 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58079 index ce31408..b1ad003 100644
58080 --- a/include/linux/fscache-cache.h
58081 +++ b/include/linux/fscache-cache.h
58082 @@ -102,7 +102,7 @@ struct fscache_operation {
58083 fscache_operation_release_t release;
58084 };
58085
58086 -extern atomic_t fscache_op_debug_id;
58087 +extern atomic_unchecked_t fscache_op_debug_id;
58088 extern void fscache_op_work_func(struct work_struct *work);
58089
58090 extern void fscache_enqueue_operation(struct fscache_operation *);
58091 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58092 {
58093 INIT_WORK(&op->work, fscache_op_work_func);
58094 atomic_set(&op->usage, 1);
58095 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58096 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58097 op->processor = processor;
58098 op->release = release;
58099 INIT_LIST_HEAD(&op->pend_link);
58100 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58101 index 2a53f10..0187fdf 100644
58102 --- a/include/linux/fsnotify.h
58103 +++ b/include/linux/fsnotify.h
58104 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58105 */
58106 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58107 {
58108 - return kstrdup(name, GFP_KERNEL);
58109 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58110 }
58111
58112 /*
58113 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58114 index 91d0e0a3..035666b 100644
58115 --- a/include/linux/fsnotify_backend.h
58116 +++ b/include/linux/fsnotify_backend.h
58117 @@ -105,6 +105,7 @@ struct fsnotify_ops {
58118 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58119 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58120 };
58121 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58122
58123 /*
58124 * A group is a "thing" that wants to receive notification about filesystem
58125 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58126 index c3da42d..c70e0df 100644
58127 --- a/include/linux/ftrace_event.h
58128 +++ b/include/linux/ftrace_event.h
58129 @@ -97,7 +97,7 @@ struct trace_event_functions {
58130 trace_print_func raw;
58131 trace_print_func hex;
58132 trace_print_func binary;
58133 -};
58134 +} __no_const;
58135
58136 struct trace_event {
58137 struct hlist_node node;
58138 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58139 extern int trace_add_event_call(struct ftrace_event_call *call);
58140 extern void trace_remove_event_call(struct ftrace_event_call *call);
58141
58142 -#define is_signed_type(type) (((type)(-1)) < 0)
58143 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58144
58145 int trace_set_clr_event(const char *system, const char *event, int set);
58146
58147 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58148 index 6d18f35..ab71e2c 100644
58149 --- a/include/linux/genhd.h
58150 +++ b/include/linux/genhd.h
58151 @@ -185,7 +185,7 @@ struct gendisk {
58152 struct kobject *slave_dir;
58153
58154 struct timer_rand_state *random;
58155 - atomic_t sync_io; /* RAID */
58156 + atomic_unchecked_t sync_io; /* RAID */
58157 struct disk_events *ev;
58158 #ifdef CONFIG_BLK_DEV_INTEGRITY
58159 struct blk_integrity *integrity;
58160 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58161 new file mode 100644
58162 index 0000000..0dc3943
58163 --- /dev/null
58164 +++ b/include/linux/gracl.h
58165 @@ -0,0 +1,317 @@
58166 +#ifndef GR_ACL_H
58167 +#define GR_ACL_H
58168 +
58169 +#include <linux/grdefs.h>
58170 +#include <linux/resource.h>
58171 +#include <linux/capability.h>
58172 +#include <linux/dcache.h>
58173 +#include <asm/resource.h>
58174 +
58175 +/* Major status information */
58176 +
58177 +#define GR_VERSION "grsecurity 2.2.2"
58178 +#define GRSECURITY_VERSION 0x2202
58179 +
58180 +enum {
58181 + GR_SHUTDOWN = 0,
58182 + GR_ENABLE = 1,
58183 + GR_SPROLE = 2,
58184 + GR_RELOAD = 3,
58185 + GR_SEGVMOD = 4,
58186 + GR_STATUS = 5,
58187 + GR_UNSPROLE = 6,
58188 + GR_PASSSET = 7,
58189 + GR_SPROLEPAM = 8,
58190 +};
58191 +
58192 +/* Password setup definitions
58193 + * kernel/grhash.c */
58194 +enum {
58195 + GR_PW_LEN = 128,
58196 + GR_SALT_LEN = 16,
58197 + GR_SHA_LEN = 32,
58198 +};
58199 +
58200 +enum {
58201 + GR_SPROLE_LEN = 64,
58202 +};
58203 +
58204 +enum {
58205 + GR_NO_GLOB = 0,
58206 + GR_REG_GLOB,
58207 + GR_CREATE_GLOB
58208 +};
58209 +
58210 +#define GR_NLIMITS 32
58211 +
58212 +/* Begin Data Structures */
58213 +
58214 +struct sprole_pw {
58215 + unsigned char *rolename;
58216 + unsigned char salt[GR_SALT_LEN];
58217 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58218 +};
58219 +
58220 +struct name_entry {
58221 + __u32 key;
58222 + ino_t inode;
58223 + dev_t device;
58224 + char *name;
58225 + __u16 len;
58226 + __u8 deleted;
58227 + struct name_entry *prev;
58228 + struct name_entry *next;
58229 +};
58230 +
58231 +struct inodev_entry {
58232 + struct name_entry *nentry;
58233 + struct inodev_entry *prev;
58234 + struct inodev_entry *next;
58235 +};
58236 +
58237 +struct acl_role_db {
58238 + struct acl_role_label **r_hash;
58239 + __u32 r_size;
58240 +};
58241 +
58242 +struct inodev_db {
58243 + struct inodev_entry **i_hash;
58244 + __u32 i_size;
58245 +};
58246 +
58247 +struct name_db {
58248 + struct name_entry **n_hash;
58249 + __u32 n_size;
58250 +};
58251 +
58252 +struct crash_uid {
58253 + uid_t uid;
58254 + unsigned long expires;
58255 +};
58256 +
58257 +struct gr_hash_struct {
58258 + void **table;
58259 + void **nametable;
58260 + void *first;
58261 + __u32 table_size;
58262 + __u32 used_size;
58263 + int type;
58264 +};
58265 +
58266 +/* Userspace Grsecurity ACL data structures */
58267 +
58268 +struct acl_subject_label {
58269 + char *filename;
58270 + ino_t inode;
58271 + dev_t device;
58272 + __u32 mode;
58273 + kernel_cap_t cap_mask;
58274 + kernel_cap_t cap_lower;
58275 + kernel_cap_t cap_invert_audit;
58276 +
58277 + struct rlimit res[GR_NLIMITS];
58278 + __u32 resmask;
58279 +
58280 + __u8 user_trans_type;
58281 + __u8 group_trans_type;
58282 + uid_t *user_transitions;
58283 + gid_t *group_transitions;
58284 + __u16 user_trans_num;
58285 + __u16 group_trans_num;
58286 +
58287 + __u32 sock_families[2];
58288 + __u32 ip_proto[8];
58289 + __u32 ip_type;
58290 + struct acl_ip_label **ips;
58291 + __u32 ip_num;
58292 + __u32 inaddr_any_override;
58293 +
58294 + __u32 crashes;
58295 + unsigned long expires;
58296 +
58297 + struct acl_subject_label *parent_subject;
58298 + struct gr_hash_struct *hash;
58299 + struct acl_subject_label *prev;
58300 + struct acl_subject_label *next;
58301 +
58302 + struct acl_object_label **obj_hash;
58303 + __u32 obj_hash_size;
58304 + __u16 pax_flags;
58305 +};
58306 +
58307 +struct role_allowed_ip {
58308 + __u32 addr;
58309 + __u32 netmask;
58310 +
58311 + struct role_allowed_ip *prev;
58312 + struct role_allowed_ip *next;
58313 +};
58314 +
58315 +struct role_transition {
58316 + char *rolename;
58317 +
58318 + struct role_transition *prev;
58319 + struct role_transition *next;
58320 +};
58321 +
58322 +struct acl_role_label {
58323 + char *rolename;
58324 + uid_t uidgid;
58325 + __u16 roletype;
58326 +
58327 + __u16 auth_attempts;
58328 + unsigned long expires;
58329 +
58330 + struct acl_subject_label *root_label;
58331 + struct gr_hash_struct *hash;
58332 +
58333 + struct acl_role_label *prev;
58334 + struct acl_role_label *next;
58335 +
58336 + struct role_transition *transitions;
58337 + struct role_allowed_ip *allowed_ips;
58338 + uid_t *domain_children;
58339 + __u16 domain_child_num;
58340 +
58341 + struct acl_subject_label **subj_hash;
58342 + __u32 subj_hash_size;
58343 +};
58344 +
58345 +struct user_acl_role_db {
58346 + struct acl_role_label **r_table;
58347 + __u32 num_pointers; /* Number of allocations to track */
58348 + __u32 num_roles; /* Number of roles */
58349 + __u32 num_domain_children; /* Number of domain children */
58350 + __u32 num_subjects; /* Number of subjects */
58351 + __u32 num_objects; /* Number of objects */
58352 +};
58353 +
58354 +struct acl_object_label {
58355 + char *filename;
58356 + ino_t inode;
58357 + dev_t device;
58358 + __u32 mode;
58359 +
58360 + struct acl_subject_label *nested;
58361 + struct acl_object_label *globbed;
58362 +
58363 + /* next two structures not used */
58364 +
58365 + struct acl_object_label *prev;
58366 + struct acl_object_label *next;
58367 +};
58368 +
58369 +struct acl_ip_label {
58370 + char *iface;
58371 + __u32 addr;
58372 + __u32 netmask;
58373 + __u16 low, high;
58374 + __u8 mode;
58375 + __u32 type;
58376 + __u32 proto[8];
58377 +
58378 + /* next two structures not used */
58379 +
58380 + struct acl_ip_label *prev;
58381 + struct acl_ip_label *next;
58382 +};
58383 +
58384 +struct gr_arg {
58385 + struct user_acl_role_db role_db;
58386 + unsigned char pw[GR_PW_LEN];
58387 + unsigned char salt[GR_SALT_LEN];
58388 + unsigned char sum[GR_SHA_LEN];
58389 + unsigned char sp_role[GR_SPROLE_LEN];
58390 + struct sprole_pw *sprole_pws;
58391 + dev_t segv_device;
58392 + ino_t segv_inode;
58393 + uid_t segv_uid;
58394 + __u16 num_sprole_pws;
58395 + __u16 mode;
58396 +};
58397 +
58398 +struct gr_arg_wrapper {
58399 + struct gr_arg *arg;
58400 + __u32 version;
58401 + __u32 size;
58402 +};
58403 +
58404 +struct subject_map {
58405 + struct acl_subject_label *user;
58406 + struct acl_subject_label *kernel;
58407 + struct subject_map *prev;
58408 + struct subject_map *next;
58409 +};
58410 +
58411 +struct acl_subj_map_db {
58412 + struct subject_map **s_hash;
58413 + __u32 s_size;
58414 +};
58415 +
58416 +/* End Data Structures Section */
58417 +
58418 +/* Hash functions generated by empirical testing by Brad Spengler
58419 + Makes good use of the low bits of the inode. Generally 0-1 times
58420 + in loop for successful match. 0-3 for unsuccessful match.
58421 + Shift/add algorithm with modulus of table size and an XOR*/
58422 +
58423 +static __inline__ unsigned int
58424 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58425 +{
58426 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58427 +}
58428 +
58429 + static __inline__ unsigned int
58430 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58431 +{
58432 + return ((const unsigned long)userp % sz);
58433 +}
58434 +
58435 +static __inline__ unsigned int
58436 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58437 +{
58438 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58439 +}
58440 +
58441 +static __inline__ unsigned int
58442 +nhash(const char *name, const __u16 len, const unsigned int sz)
58443 +{
58444 + return full_name_hash((const unsigned char *)name, len) % sz;
58445 +}
58446 +
58447 +#define FOR_EACH_ROLE_START(role) \
58448 + role = role_list; \
58449 + while (role) {
58450 +
58451 +#define FOR_EACH_ROLE_END(role) \
58452 + role = role->prev; \
58453 + }
58454 +
58455 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58456 + subj = NULL; \
58457 + iter = 0; \
58458 + while (iter < role->subj_hash_size) { \
58459 + if (subj == NULL) \
58460 + subj = role->subj_hash[iter]; \
58461 + if (subj == NULL) { \
58462 + iter++; \
58463 + continue; \
58464 + }
58465 +
58466 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58467 + subj = subj->next; \
58468 + if (subj == NULL) \
58469 + iter++; \
58470 + }
58471 +
58472 +
58473 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58474 + subj = role->hash->first; \
58475 + while (subj != NULL) {
58476 +
58477 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58478 + subj = subj->next; \
58479 + }
58480 +
58481 +#endif
58482 +
58483 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58484 new file mode 100644
58485 index 0000000..323ecf2
58486 --- /dev/null
58487 +++ b/include/linux/gralloc.h
58488 @@ -0,0 +1,9 @@
58489 +#ifndef __GRALLOC_H
58490 +#define __GRALLOC_H
58491 +
58492 +void acl_free_all(void);
58493 +int acl_alloc_stack_init(unsigned long size);
58494 +void *acl_alloc(unsigned long len);
58495 +void *acl_alloc_num(unsigned long num, unsigned long len);
58496 +
58497 +#endif
58498 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58499 new file mode 100644
58500 index 0000000..b30e9bc
58501 --- /dev/null
58502 +++ b/include/linux/grdefs.h
58503 @@ -0,0 +1,140 @@
58504 +#ifndef GRDEFS_H
58505 +#define GRDEFS_H
58506 +
58507 +/* Begin grsecurity status declarations */
58508 +
58509 +enum {
58510 + GR_READY = 0x01,
58511 + GR_STATUS_INIT = 0x00 // disabled state
58512 +};
58513 +
58514 +/* Begin ACL declarations */
58515 +
58516 +/* Role flags */
58517 +
58518 +enum {
58519 + GR_ROLE_USER = 0x0001,
58520 + GR_ROLE_GROUP = 0x0002,
58521 + GR_ROLE_DEFAULT = 0x0004,
58522 + GR_ROLE_SPECIAL = 0x0008,
58523 + GR_ROLE_AUTH = 0x0010,
58524 + GR_ROLE_NOPW = 0x0020,
58525 + GR_ROLE_GOD = 0x0040,
58526 + GR_ROLE_LEARN = 0x0080,
58527 + GR_ROLE_TPE = 0x0100,
58528 + GR_ROLE_DOMAIN = 0x0200,
58529 + GR_ROLE_PAM = 0x0400,
58530 + GR_ROLE_PERSIST = 0x0800
58531 +};
58532 +
58533 +/* ACL Subject and Object mode flags */
58534 +enum {
58535 + GR_DELETED = 0x80000000
58536 +};
58537 +
58538 +/* ACL Object-only mode flags */
58539 +enum {
58540 + GR_READ = 0x00000001,
58541 + GR_APPEND = 0x00000002,
58542 + GR_WRITE = 0x00000004,
58543 + GR_EXEC = 0x00000008,
58544 + GR_FIND = 0x00000010,
58545 + GR_INHERIT = 0x00000020,
58546 + GR_SETID = 0x00000040,
58547 + GR_CREATE = 0x00000080,
58548 + GR_DELETE = 0x00000100,
58549 + GR_LINK = 0x00000200,
58550 + GR_AUDIT_READ = 0x00000400,
58551 + GR_AUDIT_APPEND = 0x00000800,
58552 + GR_AUDIT_WRITE = 0x00001000,
58553 + GR_AUDIT_EXEC = 0x00002000,
58554 + GR_AUDIT_FIND = 0x00004000,
58555 + GR_AUDIT_INHERIT= 0x00008000,
58556 + GR_AUDIT_SETID = 0x00010000,
58557 + GR_AUDIT_CREATE = 0x00020000,
58558 + GR_AUDIT_DELETE = 0x00040000,
58559 + GR_AUDIT_LINK = 0x00080000,
58560 + GR_PTRACERD = 0x00100000,
58561 + GR_NOPTRACE = 0x00200000,
58562 + GR_SUPPRESS = 0x00400000,
58563 + GR_NOLEARN = 0x00800000,
58564 + GR_INIT_TRANSFER= 0x01000000
58565 +};
58566 +
58567 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58568 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58569 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58570 +
58571 +/* ACL subject-only mode flags */
58572 +enum {
58573 + GR_KILL = 0x00000001,
58574 + GR_VIEW = 0x00000002,
58575 + GR_PROTECTED = 0x00000004,
58576 + GR_LEARN = 0x00000008,
58577 + GR_OVERRIDE = 0x00000010,
58578 + /* just a placeholder, this mode is only used in userspace */
58579 + GR_DUMMY = 0x00000020,
58580 + GR_PROTSHM = 0x00000040,
58581 + GR_KILLPROC = 0x00000080,
58582 + GR_KILLIPPROC = 0x00000100,
58583 + /* just a placeholder, this mode is only used in userspace */
58584 + GR_NOTROJAN = 0x00000200,
58585 + GR_PROTPROCFD = 0x00000400,
58586 + GR_PROCACCT = 0x00000800,
58587 + GR_RELAXPTRACE = 0x00001000,
58588 + GR_NESTED = 0x00002000,
58589 + GR_INHERITLEARN = 0x00004000,
58590 + GR_PROCFIND = 0x00008000,
58591 + GR_POVERRIDE = 0x00010000,
58592 + GR_KERNELAUTH = 0x00020000,
58593 + GR_ATSECURE = 0x00040000,
58594 + GR_SHMEXEC = 0x00080000
58595 +};
58596 +
58597 +enum {
58598 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58599 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58600 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58601 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58602 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58603 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58604 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58605 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58606 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58607 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58608 +};
58609 +
58610 +enum {
58611 + GR_ID_USER = 0x01,
58612 + GR_ID_GROUP = 0x02,
58613 +};
58614 +
58615 +enum {
58616 + GR_ID_ALLOW = 0x01,
58617 + GR_ID_DENY = 0x02,
58618 +};
58619 +
58620 +#define GR_CRASH_RES 31
58621 +#define GR_UIDTABLE_MAX 500
58622 +
58623 +/* begin resource learning section */
58624 +enum {
58625 + GR_RLIM_CPU_BUMP = 60,
58626 + GR_RLIM_FSIZE_BUMP = 50000,
58627 + GR_RLIM_DATA_BUMP = 10000,
58628 + GR_RLIM_STACK_BUMP = 1000,
58629 + GR_RLIM_CORE_BUMP = 10000,
58630 + GR_RLIM_RSS_BUMP = 500000,
58631 + GR_RLIM_NPROC_BUMP = 1,
58632 + GR_RLIM_NOFILE_BUMP = 5,
58633 + GR_RLIM_MEMLOCK_BUMP = 50000,
58634 + GR_RLIM_AS_BUMP = 500000,
58635 + GR_RLIM_LOCKS_BUMP = 2,
58636 + GR_RLIM_SIGPENDING_BUMP = 5,
58637 + GR_RLIM_MSGQUEUE_BUMP = 10000,
58638 + GR_RLIM_NICE_BUMP = 1,
58639 + GR_RLIM_RTPRIO_BUMP = 1,
58640 + GR_RLIM_RTTIME_BUMP = 1000000
58641 +};
58642 +
58643 +#endif
58644 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
58645 new file mode 100644
58646 index 0000000..da390f1
58647 --- /dev/null
58648 +++ b/include/linux/grinternal.h
58649 @@ -0,0 +1,221 @@
58650 +#ifndef __GRINTERNAL_H
58651 +#define __GRINTERNAL_H
58652 +
58653 +#ifdef CONFIG_GRKERNSEC
58654 +
58655 +#include <linux/fs.h>
58656 +#include <linux/mnt_namespace.h>
58657 +#include <linux/nsproxy.h>
58658 +#include <linux/gracl.h>
58659 +#include <linux/grdefs.h>
58660 +#include <linux/grmsg.h>
58661 +
58662 +void gr_add_learn_entry(const char *fmt, ...)
58663 + __attribute__ ((format (printf, 1, 2)));
58664 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58665 + const struct vfsmount *mnt);
58666 +__u32 gr_check_create(const struct dentry *new_dentry,
58667 + const struct dentry *parent,
58668 + const struct vfsmount *mnt, const __u32 mode);
58669 +int gr_check_protected_task(const struct task_struct *task);
58670 +__u32 to_gr_audit(const __u32 reqmode);
58671 +int gr_set_acls(const int type);
58672 +int gr_apply_subject_to_task(struct task_struct *task);
58673 +int gr_acl_is_enabled(void);
58674 +char gr_roletype_to_char(void);
58675 +
58676 +void gr_handle_alertkill(struct task_struct *task);
58677 +char *gr_to_filename(const struct dentry *dentry,
58678 + const struct vfsmount *mnt);
58679 +char *gr_to_filename1(const struct dentry *dentry,
58680 + const struct vfsmount *mnt);
58681 +char *gr_to_filename2(const struct dentry *dentry,
58682 + const struct vfsmount *mnt);
58683 +char *gr_to_filename3(const struct dentry *dentry,
58684 + const struct vfsmount *mnt);
58685 +
58686 +extern int grsec_enable_ptrace_readexec;
58687 +extern int grsec_enable_harden_ptrace;
58688 +extern int grsec_enable_link;
58689 +extern int grsec_enable_fifo;
58690 +extern int grsec_enable_execve;
58691 +extern int grsec_enable_shm;
58692 +extern int grsec_enable_execlog;
58693 +extern int grsec_enable_signal;
58694 +extern int grsec_enable_audit_ptrace;
58695 +extern int grsec_enable_forkfail;
58696 +extern int grsec_enable_time;
58697 +extern int grsec_enable_rofs;
58698 +extern int grsec_enable_chroot_shmat;
58699 +extern int grsec_enable_chroot_mount;
58700 +extern int grsec_enable_chroot_double;
58701 +extern int grsec_enable_chroot_pivot;
58702 +extern int grsec_enable_chroot_chdir;
58703 +extern int grsec_enable_chroot_chmod;
58704 +extern int grsec_enable_chroot_mknod;
58705 +extern int grsec_enable_chroot_fchdir;
58706 +extern int grsec_enable_chroot_nice;
58707 +extern int grsec_enable_chroot_execlog;
58708 +extern int grsec_enable_chroot_caps;
58709 +extern int grsec_enable_chroot_sysctl;
58710 +extern int grsec_enable_chroot_unix;
58711 +extern int grsec_enable_tpe;
58712 +extern int grsec_tpe_gid;
58713 +extern int grsec_enable_tpe_all;
58714 +extern int grsec_enable_tpe_invert;
58715 +extern int grsec_enable_socket_all;
58716 +extern int grsec_socket_all_gid;
58717 +extern int grsec_enable_socket_client;
58718 +extern int grsec_socket_client_gid;
58719 +extern int grsec_enable_socket_server;
58720 +extern int grsec_socket_server_gid;
58721 +extern int grsec_audit_gid;
58722 +extern int grsec_enable_group;
58723 +extern int grsec_enable_audit_textrel;
58724 +extern int grsec_enable_log_rwxmaps;
58725 +extern int grsec_enable_mount;
58726 +extern int grsec_enable_chdir;
58727 +extern int grsec_resource_logging;
58728 +extern int grsec_enable_blackhole;
58729 +extern int grsec_lastack_retries;
58730 +extern int grsec_enable_brute;
58731 +extern int grsec_lock;
58732 +
58733 +extern spinlock_t grsec_alert_lock;
58734 +extern unsigned long grsec_alert_wtime;
58735 +extern unsigned long grsec_alert_fyet;
58736 +
58737 +extern spinlock_t grsec_audit_lock;
58738 +
58739 +extern rwlock_t grsec_exec_file_lock;
58740 +
58741 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58742 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58743 + (tsk)->exec_file->f_vfsmnt) : "/")
58744 +
58745 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58746 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58747 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58748 +
58749 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58750 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
58751 + (tsk)->exec_file->f_vfsmnt) : "/")
58752 +
58753 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58754 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58755 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58756 +
58757 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58758 +
58759 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58760 +
58761 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58762 + (task)->pid, (cred)->uid, \
58763 + (cred)->euid, (cred)->gid, (cred)->egid, \
58764 + gr_parent_task_fullpath(task), \
58765 + (task)->real_parent->comm, (task)->real_parent->pid, \
58766 + (pcred)->uid, (pcred)->euid, \
58767 + (pcred)->gid, (pcred)->egid
58768 +
58769 +#define GR_CHROOT_CAPS {{ \
58770 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58771 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58772 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58773 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58774 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58775 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58776 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58777 +
58778 +#define security_learn(normal_msg,args...) \
58779 +({ \
58780 + read_lock(&grsec_exec_file_lock); \
58781 + gr_add_learn_entry(normal_msg "\n", ## args); \
58782 + read_unlock(&grsec_exec_file_lock); \
58783 +})
58784 +
58785 +enum {
58786 + GR_DO_AUDIT,
58787 + GR_DONT_AUDIT,
58788 + /* used for non-audit messages that we shouldn't kill the task on */
58789 + GR_DONT_AUDIT_GOOD
58790 +};
58791 +
58792 +enum {
58793 + GR_TTYSNIFF,
58794 + GR_RBAC,
58795 + GR_RBAC_STR,
58796 + GR_STR_RBAC,
58797 + GR_RBAC_MODE2,
58798 + GR_RBAC_MODE3,
58799 + GR_FILENAME,
58800 + GR_SYSCTL_HIDDEN,
58801 + GR_NOARGS,
58802 + GR_ONE_INT,
58803 + GR_ONE_INT_TWO_STR,
58804 + GR_ONE_STR,
58805 + GR_STR_INT,
58806 + GR_TWO_STR_INT,
58807 + GR_TWO_INT,
58808 + GR_TWO_U64,
58809 + GR_THREE_INT,
58810 + GR_FIVE_INT_TWO_STR,
58811 + GR_TWO_STR,
58812 + GR_THREE_STR,
58813 + GR_FOUR_STR,
58814 + GR_STR_FILENAME,
58815 + GR_FILENAME_STR,
58816 + GR_FILENAME_TWO_INT,
58817 + GR_FILENAME_TWO_INT_STR,
58818 + GR_TEXTREL,
58819 + GR_PTRACE,
58820 + GR_RESOURCE,
58821 + GR_CAP,
58822 + GR_SIG,
58823 + GR_SIG2,
58824 + GR_CRASH1,
58825 + GR_CRASH2,
58826 + GR_PSACCT,
58827 + GR_RWXMAP
58828 +};
58829 +
58830 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58831 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58832 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58833 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58834 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58835 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58836 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58837 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58838 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58839 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58840 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58841 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58842 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58843 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58844 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58845 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58846 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58847 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58848 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58849 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58850 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58851 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58852 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58853 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58854 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58855 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58856 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58857 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58858 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58859 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58860 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58861 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58862 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58863 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58864 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58865 +
58866 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58867 +
58868 +#endif
58869 +
58870 +#endif
58871 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
58872 new file mode 100644
58873 index 0000000..b3347e2
58874 --- /dev/null
58875 +++ b/include/linux/grmsg.h
58876 @@ -0,0 +1,109 @@
58877 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58878 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58879 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58880 +#define GR_STOPMOD_MSG "denied modification of module state by "
58881 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58882 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58883 +#define GR_IOPERM_MSG "denied use of ioperm() by "
58884 +#define GR_IOPL_MSG "denied use of iopl() by "
58885 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58886 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58887 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58888 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58889 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58890 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58891 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58892 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58893 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58894 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58895 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58896 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58897 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58898 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58899 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58900 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58901 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58902 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58903 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58904 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58905 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58906 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58907 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58908 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58909 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58910 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58911 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.64s) of %.950s by "
58912 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58913 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58914 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58915 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58916 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58917 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58918 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58919 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58920 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58921 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58922 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58923 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58924 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58925 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58926 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58927 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58928 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58929 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58930 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58931 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58932 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58933 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58934 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58935 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58936 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58937 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58938 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58939 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58940 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58941 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58942 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58943 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58944 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58945 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58946 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58947 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58948 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58949 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
58950 +#define GR_NICE_CHROOT_MSG "denied priority change by "
58951 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58952 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58953 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58954 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58955 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58956 +#define GR_TIME_MSG "time set by "
58957 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58958 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58959 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58960 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58961 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58962 +#define GR_BIND_MSG "denied bind() by "
58963 +#define GR_CONNECT_MSG "denied connect() by "
58964 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58965 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58966 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58967 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58968 +#define GR_CAP_ACL_MSG "use of %s denied for "
58969 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
58970 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58971 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58972 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58973 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58974 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58975 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58976 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58977 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58978 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58979 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58980 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58981 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58982 +#define GR_VM86_MSG "denied use of vm86 by "
58983 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58984 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
58985 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58986 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
58987 new file mode 100644
58988 index 0000000..eb4885f
58989 --- /dev/null
58990 +++ b/include/linux/grsecurity.h
58991 @@ -0,0 +1,233 @@
58992 +#ifndef GR_SECURITY_H
58993 +#define GR_SECURITY_H
58994 +#include <linux/fs.h>
58995 +#include <linux/fs_struct.h>
58996 +#include <linux/binfmts.h>
58997 +#include <linux/gracl.h>
58998 +
58999 +/* notify of brain-dead configs */
59000 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59001 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59002 +#endif
59003 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59004 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59005 +#endif
59006 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59007 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59008 +#endif
59009 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59010 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59011 +#endif
59012 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59013 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59014 +#endif
59015 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59016 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
59017 +#endif
59018 +
59019 +#include <linux/compat.h>
59020 +
59021 +struct user_arg_ptr {
59022 +#ifdef CONFIG_COMPAT
59023 + bool is_compat;
59024 +#endif
59025 + union {
59026 + const char __user *const __user *native;
59027 +#ifdef CONFIG_COMPAT
59028 + compat_uptr_t __user *compat;
59029 +#endif
59030 + } ptr;
59031 +};
59032 +
59033 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59034 +void gr_handle_brute_check(void);
59035 +void gr_handle_kernel_exploit(void);
59036 +int gr_process_user_ban(void);
59037 +
59038 +char gr_roletype_to_char(void);
59039 +
59040 +int gr_acl_enable_at_secure(void);
59041 +
59042 +int gr_check_user_change(int real, int effective, int fs);
59043 +int gr_check_group_change(int real, int effective, int fs);
59044 +
59045 +void gr_del_task_from_ip_table(struct task_struct *p);
59046 +
59047 +int gr_pid_is_chrooted(struct task_struct *p);
59048 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59049 +int gr_handle_chroot_nice(void);
59050 +int gr_handle_chroot_sysctl(const int op);
59051 +int gr_handle_chroot_setpriority(struct task_struct *p,
59052 + const int niceval);
59053 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59054 +int gr_handle_chroot_chroot(const struct dentry *dentry,
59055 + const struct vfsmount *mnt);
59056 +void gr_handle_chroot_chdir(struct path *path);
59057 +int gr_handle_chroot_chmod(const struct dentry *dentry,
59058 + const struct vfsmount *mnt, const int mode);
59059 +int gr_handle_chroot_mknod(const struct dentry *dentry,
59060 + const struct vfsmount *mnt, const int mode);
59061 +int gr_handle_chroot_mount(const struct dentry *dentry,
59062 + const struct vfsmount *mnt,
59063 + const char *dev_name);
59064 +int gr_handle_chroot_pivot(void);
59065 +int gr_handle_chroot_unix(const pid_t pid);
59066 +
59067 +int gr_handle_rawio(const struct inode *inode);
59068 +
59069 +void gr_handle_ioperm(void);
59070 +void gr_handle_iopl(void);
59071 +
59072 +int gr_tpe_allow(const struct file *file);
59073 +
59074 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59075 +void gr_clear_chroot_entries(struct task_struct *task);
59076 +
59077 +void gr_log_forkfail(const int retval);
59078 +void gr_log_timechange(void);
59079 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59080 +void gr_log_chdir(const struct dentry *dentry,
59081 + const struct vfsmount *mnt);
59082 +void gr_log_chroot_exec(const struct dentry *dentry,
59083 + const struct vfsmount *mnt);
59084 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59085 +void gr_log_remount(const char *devname, const int retval);
59086 +void gr_log_unmount(const char *devname, const int retval);
59087 +void gr_log_mount(const char *from, const char *to, const int retval);
59088 +void gr_log_textrel(struct vm_area_struct *vma);
59089 +void gr_log_rwxmmap(struct file *file);
59090 +void gr_log_rwxmprotect(struct file *file);
59091 +
59092 +int gr_handle_follow_link(const struct inode *parent,
59093 + const struct inode *inode,
59094 + const struct dentry *dentry,
59095 + const struct vfsmount *mnt);
59096 +int gr_handle_fifo(const struct dentry *dentry,
59097 + const struct vfsmount *mnt,
59098 + const struct dentry *dir, const int flag,
59099 + const int acc_mode);
59100 +int gr_handle_hardlink(const struct dentry *dentry,
59101 + const struct vfsmount *mnt,
59102 + struct inode *inode,
59103 + const int mode, const char *to);
59104 +
59105 +int gr_is_capable(const int cap);
59106 +int gr_is_capable_nolog(const int cap);
59107 +void gr_learn_resource(const struct task_struct *task, const int limit,
59108 + const unsigned long wanted, const int gt);
59109 +void gr_copy_label(struct task_struct *tsk);
59110 +void gr_handle_crash(struct task_struct *task, const int sig);
59111 +int gr_handle_signal(const struct task_struct *p, const int sig);
59112 +int gr_check_crash_uid(const uid_t uid);
59113 +int gr_check_protected_task(const struct task_struct *task);
59114 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59115 +int gr_acl_handle_mmap(const struct file *file,
59116 + const unsigned long prot);
59117 +int gr_acl_handle_mprotect(const struct file *file,
59118 + const unsigned long prot);
59119 +int gr_check_hidden_task(const struct task_struct *tsk);
59120 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59121 + const struct vfsmount *mnt);
59122 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59123 + const struct vfsmount *mnt);
59124 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59125 + const struct vfsmount *mnt, const int fmode);
59126 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59127 + const struct vfsmount *mnt, mode_t mode);
59128 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59129 + const struct vfsmount *mnt, mode_t mode);
59130 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59131 + const struct vfsmount *mnt);
59132 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59133 + const struct vfsmount *mnt);
59134 +int gr_handle_ptrace(struct task_struct *task, const long request);
59135 +int gr_handle_proc_ptrace(struct task_struct *task);
59136 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59137 + const struct vfsmount *mnt);
59138 +int gr_check_crash_exec(const struct file *filp);
59139 +int gr_acl_is_enabled(void);
59140 +void gr_set_kernel_label(struct task_struct *task);
59141 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59142 + const gid_t gid);
59143 +int gr_set_proc_label(const struct dentry *dentry,
59144 + const struct vfsmount *mnt,
59145 + const int unsafe_flags);
59146 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59147 + const struct vfsmount *mnt);
59148 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59149 + const struct vfsmount *mnt, int acc_mode);
59150 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59151 + const struct dentry *p_dentry,
59152 + const struct vfsmount *p_mnt,
59153 + int open_flags, int acc_mode, const int imode);
59154 +void gr_handle_create(const struct dentry *dentry,
59155 + const struct vfsmount *mnt);
59156 +void gr_handle_proc_create(const struct dentry *dentry,
59157 + const struct inode *inode);
59158 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59159 + const struct dentry *parent_dentry,
59160 + const struct vfsmount *parent_mnt,
59161 + const int mode);
59162 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59163 + const struct dentry *parent_dentry,
59164 + const struct vfsmount *parent_mnt);
59165 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59166 + const struct vfsmount *mnt);
59167 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59168 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59169 + const struct vfsmount *mnt);
59170 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59171 + const struct dentry *parent_dentry,
59172 + const struct vfsmount *parent_mnt,
59173 + const char *from);
59174 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59175 + const struct dentry *parent_dentry,
59176 + const struct vfsmount *parent_mnt,
59177 + const struct dentry *old_dentry,
59178 + const struct vfsmount *old_mnt, const char *to);
59179 +int gr_acl_handle_rename(struct dentry *new_dentry,
59180 + struct dentry *parent_dentry,
59181 + const struct vfsmount *parent_mnt,
59182 + struct dentry *old_dentry,
59183 + struct inode *old_parent_inode,
59184 + struct vfsmount *old_mnt, const char *newname);
59185 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59186 + struct dentry *old_dentry,
59187 + struct dentry *new_dentry,
59188 + struct vfsmount *mnt, const __u8 replace);
59189 +__u32 gr_check_link(const struct dentry *new_dentry,
59190 + const struct dentry *parent_dentry,
59191 + const struct vfsmount *parent_mnt,
59192 + const struct dentry *old_dentry,
59193 + const struct vfsmount *old_mnt);
59194 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59195 + const unsigned int namelen, const ino_t ino);
59196 +
59197 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59198 + const struct vfsmount *mnt);
59199 +void gr_acl_handle_exit(void);
59200 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59201 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59202 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59203 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59204 +void gr_audit_ptrace(struct task_struct *task);
59205 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59206 +
59207 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59208 +
59209 +#ifdef CONFIG_GRKERNSEC
59210 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59211 +void gr_handle_vm86(void);
59212 +void gr_handle_mem_readwrite(u64 from, u64 to);
59213 +
59214 +extern int grsec_enable_dmesg;
59215 +extern int grsec_disable_privio;
59216 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59217 +extern int grsec_enable_chroot_findtask;
59218 +#endif
59219 +#ifdef CONFIG_GRKERNSEC_SETXID
59220 +extern int grsec_enable_setxid;
59221 +#endif
59222 +#endif
59223 +
59224 +#endif
59225 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59226 new file mode 100644
59227 index 0000000..e7ffaaf
59228 --- /dev/null
59229 +++ b/include/linux/grsock.h
59230 @@ -0,0 +1,19 @@
59231 +#ifndef __GRSOCK_H
59232 +#define __GRSOCK_H
59233 +
59234 +extern void gr_attach_curr_ip(const struct sock *sk);
59235 +extern int gr_handle_sock_all(const int family, const int type,
59236 + const int protocol);
59237 +extern int gr_handle_sock_server(const struct sockaddr *sck);
59238 +extern int gr_handle_sock_server_other(const struct sock *sck);
59239 +extern int gr_handle_sock_client(const struct sockaddr *sck);
59240 +extern int gr_search_connect(struct socket * sock,
59241 + struct sockaddr_in * addr);
59242 +extern int gr_search_bind(struct socket * sock,
59243 + struct sockaddr_in * addr);
59244 +extern int gr_search_listen(struct socket * sock);
59245 +extern int gr_search_accept(struct socket * sock);
59246 +extern int gr_search_socket(const int domain, const int type,
59247 + const int protocol);
59248 +
59249 +#endif
59250 diff --git a/include/linux/hid.h b/include/linux/hid.h
59251 index c235e4e..f0cf7a0 100644
59252 --- a/include/linux/hid.h
59253 +++ b/include/linux/hid.h
59254 @@ -679,7 +679,7 @@ struct hid_ll_driver {
59255 unsigned int code, int value);
59256
59257 int (*parse)(struct hid_device *hdev);
59258 -};
59259 +} __no_const;
59260
59261 #define PM_HINT_FULLON 1<<5
59262 #define PM_HINT_NORMAL 1<<1
59263 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59264 index 3a93f73..b19d0b3 100644
59265 --- a/include/linux/highmem.h
59266 +++ b/include/linux/highmem.h
59267 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59268 kunmap_atomic(kaddr, KM_USER0);
59269 }
59270
59271 +static inline void sanitize_highpage(struct page *page)
59272 +{
59273 + void *kaddr;
59274 + unsigned long flags;
59275 +
59276 + local_irq_save(flags);
59277 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59278 + clear_page(kaddr);
59279 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59280 + local_irq_restore(flags);
59281 +}
59282 +
59283 static inline void zero_user_segments(struct page *page,
59284 unsigned start1, unsigned end1,
59285 unsigned start2, unsigned end2)
59286 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59287 index 07d103a..04ec65b 100644
59288 --- a/include/linux/i2c.h
59289 +++ b/include/linux/i2c.h
59290 @@ -364,6 +364,7 @@ struct i2c_algorithm {
59291 /* To determine what the adapter supports */
59292 u32 (*functionality) (struct i2c_adapter *);
59293 };
59294 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59295
59296 /*
59297 * i2c_adapter is the structure used to identify a physical i2c bus along
59298 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59299 index a6deef4..c56a7f2 100644
59300 --- a/include/linux/i2o.h
59301 +++ b/include/linux/i2o.h
59302 @@ -564,7 +564,7 @@ struct i2o_controller {
59303 struct i2o_device *exec; /* Executive */
59304 #if BITS_PER_LONG == 64
59305 spinlock_t context_list_lock; /* lock for context_list */
59306 - atomic_t context_list_counter; /* needed for unique contexts */
59307 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59308 struct list_head context_list; /* list of context id's
59309 and pointers */
59310 #endif
59311 diff --git a/include/linux/init.h b/include/linux/init.h
59312 index 9146f39..885354d 100644
59313 --- a/include/linux/init.h
59314 +++ b/include/linux/init.h
59315 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59316
59317 /* Each module must use one module_init(). */
59318 #define module_init(initfn) \
59319 - static inline initcall_t __inittest(void) \
59320 + static inline __used initcall_t __inittest(void) \
59321 { return initfn; } \
59322 int init_module(void) __attribute__((alias(#initfn)));
59323
59324 /* This is only required if you want to be unloadable. */
59325 #define module_exit(exitfn) \
59326 - static inline exitcall_t __exittest(void) \
59327 + static inline __used exitcall_t __exittest(void) \
59328 { return exitfn; } \
59329 void cleanup_module(void) __attribute__((alias(#exitfn)));
59330
59331 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59332 index 32574ee..00d4ef1 100644
59333 --- a/include/linux/init_task.h
59334 +++ b/include/linux/init_task.h
59335 @@ -128,6 +128,12 @@ extern struct cred init_cred;
59336
59337 #define INIT_TASK_COMM "swapper"
59338
59339 +#ifdef CONFIG_X86
59340 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59341 +#else
59342 +#define INIT_TASK_THREAD_INFO
59343 +#endif
59344 +
59345 /*
59346 * INIT_TASK is used to set up the first task table, touch at
59347 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59348 @@ -166,6 +172,7 @@ extern struct cred init_cred;
59349 RCU_INIT_POINTER(.cred, &init_cred), \
59350 .comm = INIT_TASK_COMM, \
59351 .thread = INIT_THREAD, \
59352 + INIT_TASK_THREAD_INFO \
59353 .fs = &init_fs, \
59354 .files = &init_files, \
59355 .signal = &init_signals, \
59356 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59357 index e6ca56d..8583707 100644
59358 --- a/include/linux/intel-iommu.h
59359 +++ b/include/linux/intel-iommu.h
59360 @@ -296,7 +296,7 @@ struct iommu_flush {
59361 u8 fm, u64 type);
59362 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59363 unsigned int size_order, u64 type);
59364 -};
59365 +} __no_const;
59366
59367 enum {
59368 SR_DMAR_FECTL_REG,
59369 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59370 index a64b00e..464d8bc 100644
59371 --- a/include/linux/interrupt.h
59372 +++ b/include/linux/interrupt.h
59373 @@ -441,7 +441,7 @@ enum
59374 /* map softirq index to softirq name. update 'softirq_to_name' in
59375 * kernel/softirq.c when adding a new softirq.
59376 */
59377 -extern char *softirq_to_name[NR_SOFTIRQS];
59378 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59379
59380 /* softirq mask and active fields moved to irq_cpustat_t in
59381 * asm/hardirq.h to get better cache usage. KAO
59382 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59383
59384 struct softirq_action
59385 {
59386 - void (*action)(struct softirq_action *);
59387 + void (*action)(void);
59388 };
59389
59390 asmlinkage void do_softirq(void);
59391 asmlinkage void __do_softirq(void);
59392 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59393 +extern void open_softirq(int nr, void (*action)(void));
59394 extern void softirq_init(void);
59395 static inline void __raise_softirq_irqoff(unsigned int nr)
59396 {
59397 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59398 index 3875719..4cd454c 100644
59399 --- a/include/linux/kallsyms.h
59400 +++ b/include/linux/kallsyms.h
59401 @@ -15,7 +15,8 @@
59402
59403 struct module;
59404
59405 -#ifdef CONFIG_KALLSYMS
59406 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59407 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59408 /* Lookup the address for a symbol. Returns 0 if not found. */
59409 unsigned long kallsyms_lookup_name(const char *name);
59410
59411 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59412 /* Stupid that this does nothing, but I didn't create this mess. */
59413 #define __print_symbol(fmt, addr)
59414 #endif /*CONFIG_KALLSYMS*/
59415 +#else /* when included by kallsyms.c, vsnprintf.c, or
59416 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59417 +extern void __print_symbol(const char *fmt, unsigned long address);
59418 +extern int sprint_backtrace(char *buffer, unsigned long address);
59419 +extern int sprint_symbol(char *buffer, unsigned long address);
59420 +const char *kallsyms_lookup(unsigned long addr,
59421 + unsigned long *symbolsize,
59422 + unsigned long *offset,
59423 + char **modname, char *namebuf);
59424 +#endif
59425
59426 /* This macro allows us to keep printk typechecking */
59427 static __printf(1, 2)
59428 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59429 index fa39183..40160be 100644
59430 --- a/include/linux/kgdb.h
59431 +++ b/include/linux/kgdb.h
59432 @@ -53,7 +53,7 @@ extern int kgdb_connected;
59433 extern int kgdb_io_module_registered;
59434
59435 extern atomic_t kgdb_setting_breakpoint;
59436 -extern atomic_t kgdb_cpu_doing_single_step;
59437 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59438
59439 extern struct task_struct *kgdb_usethread;
59440 extern struct task_struct *kgdb_contthread;
59441 @@ -251,7 +251,7 @@ struct kgdb_arch {
59442 void (*disable_hw_break)(struct pt_regs *regs);
59443 void (*remove_all_hw_break)(void);
59444 void (*correct_hw_break)(void);
59445 -};
59446 +} __do_const;
59447
59448 /**
59449 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59450 @@ -276,7 +276,7 @@ struct kgdb_io {
59451 void (*pre_exception) (void);
59452 void (*post_exception) (void);
59453 int is_console;
59454 -};
59455 +} __do_const;
59456
59457 extern struct kgdb_arch arch_kgdb_ops;
59458
59459 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59460 index b16f653..eb908f4 100644
59461 --- a/include/linux/kmod.h
59462 +++ b/include/linux/kmod.h
59463 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59464 * usually useless though. */
59465 extern __printf(2, 3)
59466 int __request_module(bool wait, const char *name, ...);
59467 +extern __printf(3, 4)
59468 +int ___request_module(bool wait, char *param_name, const char *name, ...);
59469 #define request_module(mod...) __request_module(true, mod)
59470 #define request_module_nowait(mod...) __request_module(false, mod)
59471 #define try_then_request_module(x, mod...) \
59472 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59473 index d526231..086e89b 100644
59474 --- a/include/linux/kvm_host.h
59475 +++ b/include/linux/kvm_host.h
59476 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59477 void vcpu_load(struct kvm_vcpu *vcpu);
59478 void vcpu_put(struct kvm_vcpu *vcpu);
59479
59480 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59481 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59482 struct module *module);
59483 void kvm_exit(void);
59484
59485 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59486 struct kvm_guest_debug *dbg);
59487 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59488
59489 -int kvm_arch_init(void *opaque);
59490 +int kvm_arch_init(const void *opaque);
59491 void kvm_arch_exit(void);
59492
59493 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59494 diff --git a/include/linux/libata.h b/include/linux/libata.h
59495 index cafc09a..d7e7829 100644
59496 --- a/include/linux/libata.h
59497 +++ b/include/linux/libata.h
59498 @@ -909,7 +909,7 @@ struct ata_port_operations {
59499 * fields must be pointers.
59500 */
59501 const struct ata_port_operations *inherits;
59502 -};
59503 +} __do_const;
59504
59505 struct ata_port_info {
59506 unsigned long flags;
59507 diff --git a/include/linux/mca.h b/include/linux/mca.h
59508 index 3797270..7765ede 100644
59509 --- a/include/linux/mca.h
59510 +++ b/include/linux/mca.h
59511 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59512 int region);
59513 void * (*mca_transform_memory)(struct mca_device *,
59514 void *memory);
59515 -};
59516 +} __no_const;
59517
59518 struct mca_bus {
59519 u64 default_dma_mask;
59520 diff --git a/include/linux/memory.h b/include/linux/memory.h
59521 index 935699b..11042cc 100644
59522 --- a/include/linux/memory.h
59523 +++ b/include/linux/memory.h
59524 @@ -144,7 +144,7 @@ struct memory_accessor {
59525 size_t count);
59526 ssize_t (*write)(struct memory_accessor *, const char *buf,
59527 off_t offset, size_t count);
59528 -};
59529 +} __no_const;
59530
59531 /*
59532 * Kernel text modification mutex, used for code patching. Users of this lock
59533 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59534 index 9970337..9444122 100644
59535 --- a/include/linux/mfd/abx500.h
59536 +++ b/include/linux/mfd/abx500.h
59537 @@ -188,6 +188,7 @@ struct abx500_ops {
59538 int (*event_registers_startup_state_get) (struct device *, u8 *);
59539 int (*startup_irq_enabled) (struct device *, unsigned int);
59540 };
59541 +typedef struct abx500_ops __no_const abx500_ops_no_const;
59542
59543 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59544 void abx500_remove_ops(struct device *dev);
59545 diff --git a/include/linux/mm.h b/include/linux/mm.h
59546 index 4baadd1..2e0b45e 100644
59547 --- a/include/linux/mm.h
59548 +++ b/include/linux/mm.h
59549 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59550
59551 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59552 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59553 +
59554 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59555 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59556 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59557 +#else
59558 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59559 +#endif
59560 +
59561 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59562 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59563
59564 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59565 int set_page_dirty_lock(struct page *page);
59566 int clear_page_dirty_for_io(struct page *page);
59567
59568 -/* Is the vma a continuation of the stack vma above it? */
59569 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59570 -{
59571 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59572 -}
59573 -
59574 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
59575 - unsigned long addr)
59576 -{
59577 - return (vma->vm_flags & VM_GROWSDOWN) &&
59578 - (vma->vm_start == addr) &&
59579 - !vma_growsdown(vma->vm_prev, addr);
59580 -}
59581 -
59582 -/* Is the vma a continuation of the stack vma below it? */
59583 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59584 -{
59585 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59586 -}
59587 -
59588 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
59589 - unsigned long addr)
59590 -{
59591 - return (vma->vm_flags & VM_GROWSUP) &&
59592 - (vma->vm_end == addr) &&
59593 - !vma_growsup(vma->vm_next, addr);
59594 -}
59595 -
59596 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59597 unsigned long old_addr, struct vm_area_struct *new_vma,
59598 unsigned long new_addr, unsigned long len);
59599 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59600 }
59601 #endif
59602
59603 +#ifdef CONFIG_MMU
59604 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59605 +#else
59606 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59607 +{
59608 + return __pgprot(0);
59609 +}
59610 +#endif
59611 +
59612 int vma_wants_writenotify(struct vm_area_struct *vma);
59613
59614 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59615 @@ -1419,6 +1407,7 @@ out:
59616 }
59617
59618 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59619 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59620
59621 extern unsigned long do_brk(unsigned long, unsigned long);
59622
59623 @@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
59624 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59625 struct vm_area_struct **pprev);
59626
59627 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59628 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59629 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59630 +
59631 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59632 NULL if none. Assume start_addr < end_addr. */
59633 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59634 @@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
59635 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59636 }
59637
59638 -#ifdef CONFIG_MMU
59639 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
59640 -#else
59641 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59642 -{
59643 - return __pgprot(0);
59644 -}
59645 -#endif
59646 -
59647 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59648 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59649 unsigned long pfn, unsigned long size, pgprot_t);
59650 @@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
59651 extern int sysctl_memory_failure_early_kill;
59652 extern int sysctl_memory_failure_recovery;
59653 extern void shake_page(struct page *p, int access);
59654 -extern atomic_long_t mce_bad_pages;
59655 +extern atomic_long_unchecked_t mce_bad_pages;
59656 extern int soft_offline_page(struct page *page, int flags);
59657
59658 extern void dump_page(struct page *page);
59659 @@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
59660 unsigned int pages_per_huge_page);
59661 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59662
59663 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59664 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59665 +#else
59666 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59667 +#endif
59668 +
59669 #endif /* __KERNEL__ */
59670 #endif /* _LINUX_MM_H */
59671 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
59672 index 5b42f1b..759e4b4 100644
59673 --- a/include/linux/mm_types.h
59674 +++ b/include/linux/mm_types.h
59675 @@ -253,6 +253,8 @@ struct vm_area_struct {
59676 #ifdef CONFIG_NUMA
59677 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59678 #endif
59679 +
59680 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59681 };
59682
59683 struct core_thread {
59684 @@ -389,6 +391,24 @@ struct mm_struct {
59685 #ifdef CONFIG_CPUMASK_OFFSTACK
59686 struct cpumask cpumask_allocation;
59687 #endif
59688 +
59689 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59690 + unsigned long pax_flags;
59691 +#endif
59692 +
59693 +#ifdef CONFIG_PAX_DLRESOLVE
59694 + unsigned long call_dl_resolve;
59695 +#endif
59696 +
59697 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59698 + unsigned long call_syscall;
59699 +#endif
59700 +
59701 +#ifdef CONFIG_PAX_ASLR
59702 + unsigned long delta_mmap; /* randomized offset */
59703 + unsigned long delta_stack; /* randomized offset */
59704 +#endif
59705 +
59706 };
59707
59708 static inline void mm_init_cpumask(struct mm_struct *mm)
59709 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
59710 index 1d1b1e1..2a13c78 100644
59711 --- a/include/linux/mmu_notifier.h
59712 +++ b/include/linux/mmu_notifier.h
59713 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
59714 */
59715 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59716 ({ \
59717 - pte_t __pte; \
59718 + pte_t ___pte; \
59719 struct vm_area_struct *___vma = __vma; \
59720 unsigned long ___address = __address; \
59721 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59722 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59723 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59724 - __pte; \
59725 + ___pte; \
59726 })
59727
59728 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59729 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
59730 index 188cb2f..d78409b 100644
59731 --- a/include/linux/mmzone.h
59732 +++ b/include/linux/mmzone.h
59733 @@ -369,7 +369,7 @@ struct zone {
59734 unsigned long flags; /* zone flags, see below */
59735
59736 /* Zone statistics */
59737 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59738 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59739
59740 /*
59741 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59742 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
59743 index 468819c..17b9db3 100644
59744 --- a/include/linux/mod_devicetable.h
59745 +++ b/include/linux/mod_devicetable.h
59746 @@ -12,7 +12,7 @@
59747 typedef unsigned long kernel_ulong_t;
59748 #endif
59749
59750 -#define PCI_ANY_ID (~0)
59751 +#define PCI_ANY_ID ((__u16)~0)
59752
59753 struct pci_device_id {
59754 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59755 @@ -131,7 +131,7 @@ struct usb_device_id {
59756 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59757 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59758
59759 -#define HID_ANY_ID (~0)
59760 +#define HID_ANY_ID (~0U)
59761
59762 struct hid_device_id {
59763 __u16 bus;
59764 diff --git a/include/linux/module.h b/include/linux/module.h
59765 index 3cb7839..511cb87 100644
59766 --- a/include/linux/module.h
59767 +++ b/include/linux/module.h
59768 @@ -17,6 +17,7 @@
59769 #include <linux/moduleparam.h>
59770 #include <linux/tracepoint.h>
59771 #include <linux/export.h>
59772 +#include <linux/fs.h>
59773
59774 #include <linux/percpu.h>
59775 #include <asm/module.h>
59776 @@ -261,19 +262,16 @@ struct module
59777 int (*init)(void);
59778
59779 /* If this is non-NULL, vfree after init() returns */
59780 - void *module_init;
59781 + void *module_init_rx, *module_init_rw;
59782
59783 /* Here is the actual code + data, vfree'd on unload. */
59784 - void *module_core;
59785 + void *module_core_rx, *module_core_rw;
59786
59787 /* Here are the sizes of the init and core sections */
59788 - unsigned int init_size, core_size;
59789 + unsigned int init_size_rw, core_size_rw;
59790
59791 /* The size of the executable code in each section. */
59792 - unsigned int init_text_size, core_text_size;
59793 -
59794 - /* Size of RO sections of the module (text+rodata) */
59795 - unsigned int init_ro_size, core_ro_size;
59796 + unsigned int init_size_rx, core_size_rx;
59797
59798 /* Arch-specific module values */
59799 struct mod_arch_specific arch;
59800 @@ -329,6 +327,10 @@ struct module
59801 #ifdef CONFIG_EVENT_TRACING
59802 struct ftrace_event_call **trace_events;
59803 unsigned int num_trace_events;
59804 + struct file_operations trace_id;
59805 + struct file_operations trace_enable;
59806 + struct file_operations trace_format;
59807 + struct file_operations trace_filter;
59808 #endif
59809 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59810 unsigned int num_ftrace_callsites;
59811 @@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
59812 bool is_module_percpu_address(unsigned long addr);
59813 bool is_module_text_address(unsigned long addr);
59814
59815 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59816 +{
59817 +
59818 +#ifdef CONFIG_PAX_KERNEXEC
59819 + if (ktla_ktva(addr) >= (unsigned long)start &&
59820 + ktla_ktva(addr) < (unsigned long)start + size)
59821 + return 1;
59822 +#endif
59823 +
59824 + return ((void *)addr >= start && (void *)addr < start + size);
59825 +}
59826 +
59827 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59828 +{
59829 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59830 +}
59831 +
59832 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59833 +{
59834 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59835 +}
59836 +
59837 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59838 +{
59839 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59840 +}
59841 +
59842 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59843 +{
59844 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59845 +}
59846 +
59847 static inline int within_module_core(unsigned long addr, struct module *mod)
59848 {
59849 - return (unsigned long)mod->module_core <= addr &&
59850 - addr < (unsigned long)mod->module_core + mod->core_size;
59851 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59852 }
59853
59854 static inline int within_module_init(unsigned long addr, struct module *mod)
59855 {
59856 - return (unsigned long)mod->module_init <= addr &&
59857 - addr < (unsigned long)mod->module_init + mod->init_size;
59858 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59859 }
59860
59861 /* Search for module by name: must hold module_mutex. */
59862 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
59863 index b2be02e..6a9fdb1 100644
59864 --- a/include/linux/moduleloader.h
59865 +++ b/include/linux/moduleloader.h
59866 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
59867 sections. Returns NULL on failure. */
59868 void *module_alloc(unsigned long size);
59869
59870 +#ifdef CONFIG_PAX_KERNEXEC
59871 +void *module_alloc_exec(unsigned long size);
59872 +#else
59873 +#define module_alloc_exec(x) module_alloc(x)
59874 +#endif
59875 +
59876 /* Free memory returned from module_alloc. */
59877 void module_free(struct module *mod, void *module_region);
59878
59879 +#ifdef CONFIG_PAX_KERNEXEC
59880 +void module_free_exec(struct module *mod, void *module_region);
59881 +#else
59882 +#define module_free_exec(x, y) module_free((x), (y))
59883 +#endif
59884 +
59885 /* Apply the given relocation to the (simplified) ELF. Return -error
59886 or 0. */
59887 int apply_relocate(Elf_Shdr *sechdrs,
59888 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
59889 index 7939f63..ec6df57 100644
59890 --- a/include/linux/moduleparam.h
59891 +++ b/include/linux/moduleparam.h
59892 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
59893 * @len is usually just sizeof(string).
59894 */
59895 #define module_param_string(name, string, len, perm) \
59896 - static const struct kparam_string __param_string_##name \
59897 + static const struct kparam_string __param_string_##name __used \
59898 = { len, string }; \
59899 __module_param_call(MODULE_PARAM_PREFIX, name, \
59900 &param_ops_string, \
59901 @@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
59902 * module_param_named() for why this might be necessary.
59903 */
59904 #define module_param_array_named(name, array, type, nump, perm) \
59905 - static const struct kparam_array __param_arr_##name \
59906 + static const struct kparam_array __param_arr_##name __used \
59907 = { .max = ARRAY_SIZE(array), .num = nump, \
59908 .ops = &param_ops_##type, \
59909 .elemsize = sizeof(array[0]), .elem = array }; \
59910 diff --git a/include/linux/namei.h b/include/linux/namei.h
59911 index ffc0213..2c1f2cb 100644
59912 --- a/include/linux/namei.h
59913 +++ b/include/linux/namei.h
59914 @@ -24,7 +24,7 @@ struct nameidata {
59915 unsigned seq;
59916 int last_type;
59917 unsigned depth;
59918 - char *saved_names[MAX_NESTED_LINKS + 1];
59919 + const char *saved_names[MAX_NESTED_LINKS + 1];
59920
59921 /* Intent data */
59922 union {
59923 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
59924 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59925 extern void unlock_rename(struct dentry *, struct dentry *);
59926
59927 -static inline void nd_set_link(struct nameidata *nd, char *path)
59928 +static inline void nd_set_link(struct nameidata *nd, const char *path)
59929 {
59930 nd->saved_names[nd->depth] = path;
59931 }
59932
59933 -static inline char *nd_get_link(struct nameidata *nd)
59934 +static inline const char *nd_get_link(const struct nameidata *nd)
59935 {
59936 return nd->saved_names[nd->depth];
59937 }
59938 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
59939 index a82ad4d..90d15b7 100644
59940 --- a/include/linux/netdevice.h
59941 +++ b/include/linux/netdevice.h
59942 @@ -949,6 +949,7 @@ struct net_device_ops {
59943 int (*ndo_set_features)(struct net_device *dev,
59944 u32 features);
59945 };
59946 +typedef struct net_device_ops __no_const net_device_ops_no_const;
59947
59948 /*
59949 * The DEVICE structure.
59950 @@ -1088,7 +1089,7 @@ struct net_device {
59951 int iflink;
59952
59953 struct net_device_stats stats;
59954 - atomic_long_t rx_dropped; /* dropped packets by core network
59955 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
59956 * Do not use this in drivers.
59957 */
59958
59959 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
59960 new file mode 100644
59961 index 0000000..33f4af8
59962 --- /dev/null
59963 +++ b/include/linux/netfilter/xt_gradm.h
59964 @@ -0,0 +1,9 @@
59965 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
59966 +#define _LINUX_NETFILTER_XT_GRADM_H 1
59967 +
59968 +struct xt_gradm_mtinfo {
59969 + __u16 flags;
59970 + __u16 invflags;
59971 +};
59972 +
59973 +#endif
59974 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
59975 index c65a18a..0c05f3a 100644
59976 --- a/include/linux/of_pdt.h
59977 +++ b/include/linux/of_pdt.h
59978 @@ -32,7 +32,7 @@ struct of_pdt_ops {
59979
59980 /* return 0 on success; fill in 'len' with number of bytes in path */
59981 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59982 -};
59983 +} __no_const;
59984
59985 extern void *prom_early_alloc(unsigned long size);
59986
59987 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
59988 index a4c5624..79d6d88 100644
59989 --- a/include/linux/oprofile.h
59990 +++ b/include/linux/oprofile.h
59991 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
59992 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59993 char const * name, ulong * val);
59994
59995 -/** Create a file for read-only access to an atomic_t. */
59996 +/** Create a file for read-only access to an atomic_unchecked_t. */
59997 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59998 - char const * name, atomic_t * val);
59999 + char const * name, atomic_unchecked_t * val);
60000
60001 /** create a directory */
60002 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60003 diff --git a/include/linux/padata.h b/include/linux/padata.h
60004 index 4633b2f..988bc08 100644
60005 --- a/include/linux/padata.h
60006 +++ b/include/linux/padata.h
60007 @@ -129,7 +129,7 @@ struct parallel_data {
60008 struct padata_instance *pinst;
60009 struct padata_parallel_queue __percpu *pqueue;
60010 struct padata_serial_queue __percpu *squeue;
60011 - atomic_t seq_nr;
60012 + atomic_unchecked_t seq_nr;
60013 atomic_t reorder_objects;
60014 atomic_t refcnt;
60015 unsigned int max_seq_nr;
60016 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60017 index b1f8912..c955bff 100644
60018 --- a/include/linux/perf_event.h
60019 +++ b/include/linux/perf_event.h
60020 @@ -748,8 +748,8 @@ struct perf_event {
60021
60022 enum perf_event_active_state state;
60023 unsigned int attach_state;
60024 - local64_t count;
60025 - atomic64_t child_count;
60026 + local64_t count; /* PaX: fix it one day */
60027 + atomic64_unchecked_t child_count;
60028
60029 /*
60030 * These are the total time in nanoseconds that the event
60031 @@ -800,8 +800,8 @@ struct perf_event {
60032 * These accumulate total time (in nanoseconds) that children
60033 * events have been enabled and running, respectively.
60034 */
60035 - atomic64_t child_total_time_enabled;
60036 - atomic64_t child_total_time_running;
60037 + atomic64_unchecked_t child_total_time_enabled;
60038 + atomic64_unchecked_t child_total_time_running;
60039
60040 /*
60041 * Protect attach/detach and child_list:
60042 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60043 index 77257c9..51d473a 100644
60044 --- a/include/linux/pipe_fs_i.h
60045 +++ b/include/linux/pipe_fs_i.h
60046 @@ -46,9 +46,9 @@ struct pipe_buffer {
60047 struct pipe_inode_info {
60048 wait_queue_head_t wait;
60049 unsigned int nrbufs, curbuf, buffers;
60050 - unsigned int readers;
60051 - unsigned int writers;
60052 - unsigned int waiting_writers;
60053 + atomic_t readers;
60054 + atomic_t writers;
60055 + atomic_t waiting_writers;
60056 unsigned int r_counter;
60057 unsigned int w_counter;
60058 struct page *tmp_page;
60059 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60060 index d3085e7..fd01052 100644
60061 --- a/include/linux/pm_runtime.h
60062 +++ b/include/linux/pm_runtime.h
60063 @@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60064
60065 static inline void pm_runtime_mark_last_busy(struct device *dev)
60066 {
60067 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
60068 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60069 }
60070
60071 #else /* !CONFIG_PM_RUNTIME */
60072 diff --git a/include/linux/poison.h b/include/linux/poison.h
60073 index 79159de..f1233a9 100644
60074 --- a/include/linux/poison.h
60075 +++ b/include/linux/poison.h
60076 @@ -19,8 +19,8 @@
60077 * under normal circumstances, used to verify that nobody uses
60078 * non-initialized list entries.
60079 */
60080 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60081 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60082 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60083 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60084
60085 /********** include/linux/timer.h **********/
60086 /*
60087 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60088 index 58969b2..ead129b 100644
60089 --- a/include/linux/preempt.h
60090 +++ b/include/linux/preempt.h
60091 @@ -123,7 +123,7 @@ struct preempt_ops {
60092 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60093 void (*sched_out)(struct preempt_notifier *notifier,
60094 struct task_struct *next);
60095 -};
60096 +} __no_const;
60097
60098 /**
60099 * preempt_notifier - key for installing preemption notifiers
60100 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60101 index 643b96c..ef55a9c 100644
60102 --- a/include/linux/proc_fs.h
60103 +++ b/include/linux/proc_fs.h
60104 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60105 return proc_create_data(name, mode, parent, proc_fops, NULL);
60106 }
60107
60108 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60109 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60110 +{
60111 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60112 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60113 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60114 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60115 +#else
60116 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60117 +#endif
60118 +}
60119 +
60120 +
60121 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60122 mode_t mode, struct proc_dir_entry *base,
60123 read_proc_t *read_proc, void * data)
60124 @@ -258,7 +271,7 @@ union proc_op {
60125 int (*proc_show)(struct seq_file *m,
60126 struct pid_namespace *ns, struct pid *pid,
60127 struct task_struct *task);
60128 -};
60129 +} __no_const;
60130
60131 struct ctl_table_header;
60132 struct ctl_table;
60133 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60134 index 800f113..e9ee2e3 100644
60135 --- a/include/linux/ptrace.h
60136 +++ b/include/linux/ptrace.h
60137 @@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60138 extern void exit_ptrace(struct task_struct *tracer);
60139 #define PTRACE_MODE_READ 1
60140 #define PTRACE_MODE_ATTACH 2
60141 -/* Returns 0 on success, -errno on denial. */
60142 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60143 /* Returns true on success, false on denial. */
60144 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60145 +/* Returns true on success, false on denial. */
60146 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60147 +/* Returns true on success, false on denial. */
60148 +extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60149
60150 static inline int ptrace_reparented(struct task_struct *child)
60151 {
60152 diff --git a/include/linux/random.h b/include/linux/random.h
60153 index 8f74538..02a1012 100644
60154 --- a/include/linux/random.h
60155 +++ b/include/linux/random.h
60156 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
60157
60158 u32 prandom32(struct rnd_state *);
60159
60160 +static inline unsigned long pax_get_random_long(void)
60161 +{
60162 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60163 +}
60164 +
60165 /*
60166 * Handle minimum values for seeds
60167 */
60168 static inline u32 __seed(u32 x, u32 m)
60169 {
60170 - return (x < m) ? x + m : x;
60171 + return (x <= m) ? x + m + 1 : x;
60172 }
60173
60174 /**
60175 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60176 index e0879a7..a12f962 100644
60177 --- a/include/linux/reboot.h
60178 +++ b/include/linux/reboot.h
60179 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60180 * Architecture-specific implementations of sys_reboot commands.
60181 */
60182
60183 -extern void machine_restart(char *cmd);
60184 -extern void machine_halt(void);
60185 -extern void machine_power_off(void);
60186 +extern void machine_restart(char *cmd) __noreturn;
60187 +extern void machine_halt(void) __noreturn;
60188 +extern void machine_power_off(void) __noreturn;
60189
60190 extern void machine_shutdown(void);
60191 struct pt_regs;
60192 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60193 */
60194
60195 extern void kernel_restart_prepare(char *cmd);
60196 -extern void kernel_restart(char *cmd);
60197 -extern void kernel_halt(void);
60198 -extern void kernel_power_off(void);
60199 +extern void kernel_restart(char *cmd) __noreturn;
60200 +extern void kernel_halt(void) __noreturn;
60201 +extern void kernel_power_off(void) __noreturn;
60202
60203 extern int C_A_D; /* for sysctl */
60204 void ctrl_alt_del(void);
60205 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60206 * Emergency restart, callable from an interrupt handler.
60207 */
60208
60209 -extern void emergency_restart(void);
60210 +extern void emergency_restart(void) __noreturn;
60211 #include <asm/emergency-restart.h>
60212
60213 #endif
60214 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60215 index 96d465f..b084e05 100644
60216 --- a/include/linux/reiserfs_fs.h
60217 +++ b/include/linux/reiserfs_fs.h
60218 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60219 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60220
60221 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60222 -#define get_generation(s) atomic_read (&fs_generation(s))
60223 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60224 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60225 #define __fs_changed(gen,s) (gen != get_generation (s))
60226 #define fs_changed(gen,s) \
60227 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60228 index 52c83b6..18ed7eb 100644
60229 --- a/include/linux/reiserfs_fs_sb.h
60230 +++ b/include/linux/reiserfs_fs_sb.h
60231 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60232 /* Comment? -Hans */
60233 wait_queue_head_t s_wait;
60234 /* To be obsoleted soon by per buffer seals.. -Hans */
60235 - atomic_t s_generation_counter; // increased by one every time the
60236 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60237 // tree gets re-balanced
60238 unsigned long s_properties; /* File system properties. Currently holds
60239 on-disk FS format */
60240 diff --git a/include/linux/relay.h b/include/linux/relay.h
60241 index 14a86bc..17d0700 100644
60242 --- a/include/linux/relay.h
60243 +++ b/include/linux/relay.h
60244 @@ -159,7 +159,7 @@ struct rchan_callbacks
60245 * The callback should return 0 if successful, negative if not.
60246 */
60247 int (*remove_buf_file)(struct dentry *dentry);
60248 -};
60249 +} __no_const;
60250
60251 /*
60252 * CONFIG_RELAY kernel API, kernel/relay.c
60253 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60254 index c6c6084..5bf1212 100644
60255 --- a/include/linux/rfkill.h
60256 +++ b/include/linux/rfkill.h
60257 @@ -147,6 +147,7 @@ struct rfkill_ops {
60258 void (*query)(struct rfkill *rfkill, void *data);
60259 int (*set_block)(void *data, bool blocked);
60260 };
60261 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60262
60263 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60264 /**
60265 diff --git a/include/linux/rio.h b/include/linux/rio.h
60266 index 4d50611..c6858a2 100644
60267 --- a/include/linux/rio.h
60268 +++ b/include/linux/rio.h
60269 @@ -315,7 +315,7 @@ struct rio_ops {
60270 int mbox, void *buffer, size_t len);
60271 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60272 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60273 -};
60274 +} __no_const;
60275
60276 #define RIO_RESOURCE_MEM 0x00000100
60277 #define RIO_RESOURCE_DOORBELL 0x00000200
60278 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60279 index 2148b12..519b820 100644
60280 --- a/include/linux/rmap.h
60281 +++ b/include/linux/rmap.h
60282 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60283 void anon_vma_init(void); /* create anon_vma_cachep */
60284 int anon_vma_prepare(struct vm_area_struct *);
60285 void unlink_anon_vmas(struct vm_area_struct *);
60286 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60287 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60288 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60289 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60290 void __anon_vma_link(struct vm_area_struct *);
60291
60292 static inline void anon_vma_merge(struct vm_area_struct *vma,
60293 diff --git a/include/linux/sched.h b/include/linux/sched.h
60294 index 1c4f3e9..f29cbeb 100644
60295 --- a/include/linux/sched.h
60296 +++ b/include/linux/sched.h
60297 @@ -101,6 +101,7 @@ struct bio_list;
60298 struct fs_struct;
60299 struct perf_event_context;
60300 struct blk_plug;
60301 +struct linux_binprm;
60302
60303 /*
60304 * List of flags we want to share for kernel threads,
60305 @@ -380,10 +381,13 @@ struct user_namespace;
60306 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60307
60308 extern int sysctl_max_map_count;
60309 +extern unsigned long sysctl_heap_stack_gap;
60310
60311 #include <linux/aio.h>
60312
60313 #ifdef CONFIG_MMU
60314 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60315 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60316 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60317 extern unsigned long
60318 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60319 @@ -629,6 +633,17 @@ struct signal_struct {
60320 #ifdef CONFIG_TASKSTATS
60321 struct taskstats *stats;
60322 #endif
60323 +
60324 +#ifdef CONFIG_GRKERNSEC
60325 + u32 curr_ip;
60326 + u32 saved_ip;
60327 + u32 gr_saddr;
60328 + u32 gr_daddr;
60329 + u16 gr_sport;
60330 + u16 gr_dport;
60331 + u8 used_accept:1;
60332 +#endif
60333 +
60334 #ifdef CONFIG_AUDIT
60335 unsigned audit_tty;
60336 struct tty_audit_buf *tty_audit_buf;
60337 @@ -710,6 +725,11 @@ struct user_struct {
60338 struct key *session_keyring; /* UID's default session keyring */
60339 #endif
60340
60341 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60342 + unsigned int banned;
60343 + unsigned long ban_expires;
60344 +#endif
60345 +
60346 /* Hash table maintenance information */
60347 struct hlist_node uidhash_node;
60348 uid_t uid;
60349 @@ -1337,8 +1357,8 @@ struct task_struct {
60350 struct list_head thread_group;
60351
60352 struct completion *vfork_done; /* for vfork() */
60353 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60354 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60355 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60356 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60357
60358 cputime_t utime, stime, utimescaled, stimescaled;
60359 cputime_t gtime;
60360 @@ -1354,13 +1374,6 @@ struct task_struct {
60361 struct task_cputime cputime_expires;
60362 struct list_head cpu_timers[3];
60363
60364 -/* process credentials */
60365 - const struct cred __rcu *real_cred; /* objective and real subjective task
60366 - * credentials (COW) */
60367 - const struct cred __rcu *cred; /* effective (overridable) subjective task
60368 - * credentials (COW) */
60369 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60370 -
60371 char comm[TASK_COMM_LEN]; /* executable name excluding path
60372 - access with [gs]et_task_comm (which lock
60373 it with task_lock())
60374 @@ -1377,8 +1390,16 @@ struct task_struct {
60375 #endif
60376 /* CPU-specific state of this task */
60377 struct thread_struct thread;
60378 +/* thread_info moved to task_struct */
60379 +#ifdef CONFIG_X86
60380 + struct thread_info tinfo;
60381 +#endif
60382 /* filesystem information */
60383 struct fs_struct *fs;
60384 +
60385 + const struct cred __rcu *cred; /* effective (overridable) subjective task
60386 + * credentials (COW) */
60387 +
60388 /* open file information */
60389 struct files_struct *files;
60390 /* namespaces */
60391 @@ -1425,6 +1446,11 @@ struct task_struct {
60392 struct rt_mutex_waiter *pi_blocked_on;
60393 #endif
60394
60395 +/* process credentials */
60396 + const struct cred __rcu *real_cred; /* objective and real subjective task
60397 + * credentials (COW) */
60398 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60399 +
60400 #ifdef CONFIG_DEBUG_MUTEXES
60401 /* mutex deadlock detection */
60402 struct mutex_waiter *blocked_on;
60403 @@ -1540,6 +1566,24 @@ struct task_struct {
60404 unsigned long default_timer_slack_ns;
60405
60406 struct list_head *scm_work_list;
60407 +
60408 +#ifdef CONFIG_GRKERNSEC
60409 + /* grsecurity */
60410 +#ifdef CONFIG_GRKERNSEC_SETXID
60411 + const struct cred *delayed_cred;
60412 +#endif
60413 + struct dentry *gr_chroot_dentry;
60414 + struct acl_subject_label *acl;
60415 + struct acl_role_label *role;
60416 + struct file *exec_file;
60417 + u16 acl_role_id;
60418 + /* is this the task that authenticated to the special role */
60419 + u8 acl_sp_role;
60420 + u8 is_writable;
60421 + u8 brute;
60422 + u8 gr_is_chrooted;
60423 +#endif
60424 +
60425 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60426 /* Index of current stored address in ret_stack */
60427 int curr_ret_stack;
60428 @@ -1574,6 +1618,51 @@ struct task_struct {
60429 #endif
60430 };
60431
60432 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60433 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60434 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60435 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60436 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60437 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60438 +
60439 +#ifdef CONFIG_PAX_SOFTMODE
60440 +extern int pax_softmode;
60441 +#endif
60442 +
60443 +extern int pax_check_flags(unsigned long *);
60444 +
60445 +/* if tsk != current then task_lock must be held on it */
60446 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60447 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60448 +{
60449 + if (likely(tsk->mm))
60450 + return tsk->mm->pax_flags;
60451 + else
60452 + return 0UL;
60453 +}
60454 +
60455 +/* if tsk != current then task_lock must be held on it */
60456 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60457 +{
60458 + if (likely(tsk->mm)) {
60459 + tsk->mm->pax_flags = flags;
60460 + return 0;
60461 + }
60462 + return -EINVAL;
60463 +}
60464 +#endif
60465 +
60466 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60467 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60468 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60469 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60470 +#endif
60471 +
60472 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60473 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60474 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60475 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60476 +
60477 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60478 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60479
60480 @@ -2081,7 +2170,9 @@ void yield(void);
60481 extern struct exec_domain default_exec_domain;
60482
60483 union thread_union {
60484 +#ifndef CONFIG_X86
60485 struct thread_info thread_info;
60486 +#endif
60487 unsigned long stack[THREAD_SIZE/sizeof(long)];
60488 };
60489
60490 @@ -2114,6 +2205,7 @@ extern struct pid_namespace init_pid_ns;
60491 */
60492
60493 extern struct task_struct *find_task_by_vpid(pid_t nr);
60494 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60495 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60496 struct pid_namespace *ns);
60497
60498 @@ -2235,6 +2327,12 @@ static inline void mmdrop(struct mm_struct * mm)
60499 extern void mmput(struct mm_struct *);
60500 /* Grab a reference to a task's mm, if it is not already going away */
60501 extern struct mm_struct *get_task_mm(struct task_struct *task);
60502 +/*
60503 + * Grab a reference to a task's mm, if it is not already going away
60504 + * and ptrace_may_access with the mode parameter passed to it
60505 + * succeeds.
60506 + */
60507 +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
60508 /* Remove the current tasks stale references to the old mm_struct */
60509 extern void mm_release(struct task_struct *, struct mm_struct *);
60510 /* Allocate a new mm structure and copy contents from tsk->mm */
60511 @@ -2251,7 +2349,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60512 extern void exit_itimers(struct signal_struct *);
60513 extern void flush_itimer_signals(void);
60514
60515 -extern NORET_TYPE void do_group_exit(int);
60516 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60517
60518 extern void daemonize(const char *, ...);
60519 extern int allow_signal(int);
60520 @@ -2416,13 +2514,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60521
60522 #endif
60523
60524 -static inline int object_is_on_stack(void *obj)
60525 +static inline int object_starts_on_stack(void *obj)
60526 {
60527 - void *stack = task_stack_page(current);
60528 + const void *stack = task_stack_page(current);
60529
60530 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60531 }
60532
60533 +#ifdef CONFIG_PAX_USERCOPY
60534 +extern int object_is_on_stack(const void *obj, unsigned long len);
60535 +#endif
60536 +
60537 extern void thread_info_cache_init(void);
60538
60539 #ifdef CONFIG_DEBUG_STACK_USAGE
60540 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60541 index 899fbb4..1cb4138 100644
60542 --- a/include/linux/screen_info.h
60543 +++ b/include/linux/screen_info.h
60544 @@ -43,7 +43,8 @@ struct screen_info {
60545 __u16 pages; /* 0x32 */
60546 __u16 vesa_attributes; /* 0x34 */
60547 __u32 capabilities; /* 0x36 */
60548 - __u8 _reserved[6]; /* 0x3a */
60549 + __u16 vesapm_size; /* 0x3a */
60550 + __u8 _reserved[4]; /* 0x3c */
60551 } __attribute__((packed));
60552
60553 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60554 diff --git a/include/linux/security.h b/include/linux/security.h
60555 index e8c619d..e0cbd1c 100644
60556 --- a/include/linux/security.h
60557 +++ b/include/linux/security.h
60558 @@ -37,6 +37,7 @@
60559 #include <linux/xfrm.h>
60560 #include <linux/slab.h>
60561 #include <linux/xattr.h>
60562 +#include <linux/grsecurity.h>
60563 #include <net/flow.h>
60564
60565 /* Maximum number of letters for an LSM name string */
60566 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60567 index 0b69a46..e9e5538 100644
60568 --- a/include/linux/seq_file.h
60569 +++ b/include/linux/seq_file.h
60570 @@ -33,6 +33,7 @@ struct seq_operations {
60571 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60572 int (*show) (struct seq_file *m, void *v);
60573 };
60574 +typedef struct seq_operations __no_const seq_operations_no_const;
60575
60576 #define SEQ_SKIP 1
60577
60578 diff --git a/include/linux/shm.h b/include/linux/shm.h
60579 index 92808b8..c28cac4 100644
60580 --- a/include/linux/shm.h
60581 +++ b/include/linux/shm.h
60582 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60583
60584 /* The task created the shm object. NULL if the task is dead. */
60585 struct task_struct *shm_creator;
60586 +#ifdef CONFIG_GRKERNSEC
60587 + time_t shm_createtime;
60588 + pid_t shm_lapid;
60589 +#endif
60590 };
60591
60592 /* shm_mode upper byte flags */
60593 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
60594 index fe86488..1563c1c 100644
60595 --- a/include/linux/skbuff.h
60596 +++ b/include/linux/skbuff.h
60597 @@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
60598 */
60599 static inline int skb_queue_empty(const struct sk_buff_head *list)
60600 {
60601 - return list->next == (struct sk_buff *)list;
60602 + return list->next == (const struct sk_buff *)list;
60603 }
60604
60605 /**
60606 @@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
60607 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60608 const struct sk_buff *skb)
60609 {
60610 - return skb->next == (struct sk_buff *)list;
60611 + return skb->next == (const struct sk_buff *)list;
60612 }
60613
60614 /**
60615 @@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60616 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60617 const struct sk_buff *skb)
60618 {
60619 - return skb->prev == (struct sk_buff *)list;
60620 + return skb->prev == (const struct sk_buff *)list;
60621 }
60622
60623 /**
60624 @@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
60625 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60626 */
60627 #ifndef NET_SKB_PAD
60628 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60629 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60630 #endif
60631
60632 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60633 diff --git a/include/linux/slab.h b/include/linux/slab.h
60634 index 573c809..e84c132 100644
60635 --- a/include/linux/slab.h
60636 +++ b/include/linux/slab.h
60637 @@ -11,12 +11,20 @@
60638
60639 #include <linux/gfp.h>
60640 #include <linux/types.h>
60641 +#include <linux/err.h>
60642
60643 /*
60644 * Flags to pass to kmem_cache_create().
60645 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60646 */
60647 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60648 +
60649 +#ifdef CONFIG_PAX_USERCOPY
60650 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60651 +#else
60652 +#define SLAB_USERCOPY 0x00000000UL
60653 +#endif
60654 +
60655 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60656 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60657 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60658 @@ -87,10 +95,13 @@
60659 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60660 * Both make kfree a no-op.
60661 */
60662 -#define ZERO_SIZE_PTR ((void *)16)
60663 +#define ZERO_SIZE_PTR \
60664 +({ \
60665 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60666 + (void *)(-MAX_ERRNO-1L); \
60667 +})
60668
60669 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60670 - (unsigned long)ZERO_SIZE_PTR)
60671 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60672
60673 /*
60674 * struct kmem_cache related prototypes
60675 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
60676 void kfree(const void *);
60677 void kzfree(const void *);
60678 size_t ksize(const void *);
60679 +void check_object_size(const void *ptr, unsigned long n, bool to);
60680
60681 /*
60682 * Allocator specific definitions. These are mainly used to establish optimized
60683 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
60684
60685 void __init kmem_cache_init_late(void);
60686
60687 +#define kmalloc(x, y) \
60688 +({ \
60689 + void *___retval; \
60690 + intoverflow_t ___x = (intoverflow_t)x; \
60691 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60692 + ___retval = NULL; \
60693 + else \
60694 + ___retval = kmalloc((size_t)___x, (y)); \
60695 + ___retval; \
60696 +})
60697 +
60698 +#define kmalloc_node(x, y, z) \
60699 +({ \
60700 + void *___retval; \
60701 + intoverflow_t ___x = (intoverflow_t)x; \
60702 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60703 + ___retval = NULL; \
60704 + else \
60705 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
60706 + ___retval; \
60707 +})
60708 +
60709 +#define kzalloc(x, y) \
60710 +({ \
60711 + void *___retval; \
60712 + intoverflow_t ___x = (intoverflow_t)x; \
60713 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60714 + ___retval = NULL; \
60715 + else \
60716 + ___retval = kzalloc((size_t)___x, (y)); \
60717 + ___retval; \
60718 +})
60719 +
60720 +#define __krealloc(x, y, z) \
60721 +({ \
60722 + void *___retval; \
60723 + intoverflow_t ___y = (intoverflow_t)y; \
60724 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60725 + ___retval = NULL; \
60726 + else \
60727 + ___retval = __krealloc((x), (size_t)___y, (z)); \
60728 + ___retval; \
60729 +})
60730 +
60731 +#define krealloc(x, y, z) \
60732 +({ \
60733 + void *___retval; \
60734 + intoverflow_t ___y = (intoverflow_t)y; \
60735 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60736 + ___retval = NULL; \
60737 + else \
60738 + ___retval = krealloc((x), (size_t)___y, (z)); \
60739 + ___retval; \
60740 +})
60741 +
60742 #endif /* _LINUX_SLAB_H */
60743 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
60744 index d00e0ba..1b3bf7b 100644
60745 --- a/include/linux/slab_def.h
60746 +++ b/include/linux/slab_def.h
60747 @@ -68,10 +68,10 @@ struct kmem_cache {
60748 unsigned long node_allocs;
60749 unsigned long node_frees;
60750 unsigned long node_overflow;
60751 - atomic_t allochit;
60752 - atomic_t allocmiss;
60753 - atomic_t freehit;
60754 - atomic_t freemiss;
60755 + atomic_unchecked_t allochit;
60756 + atomic_unchecked_t allocmiss;
60757 + atomic_unchecked_t freehit;
60758 + atomic_unchecked_t freemiss;
60759
60760 /*
60761 * If debugging is enabled, then the allocator can add additional
60762 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
60763 index a32bcfd..53b71f4 100644
60764 --- a/include/linux/slub_def.h
60765 +++ b/include/linux/slub_def.h
60766 @@ -89,7 +89,7 @@ struct kmem_cache {
60767 struct kmem_cache_order_objects max;
60768 struct kmem_cache_order_objects min;
60769 gfp_t allocflags; /* gfp flags to use on each alloc */
60770 - int refcount; /* Refcount for slab cache destroy */
60771 + atomic_t refcount; /* Refcount for slab cache destroy */
60772 void (*ctor)(void *);
60773 int inuse; /* Offset to metadata */
60774 int align; /* Alignment */
60775 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
60776 }
60777
60778 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60779 -void *__kmalloc(size_t size, gfp_t flags);
60780 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60781
60782 static __always_inline void *
60783 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60784 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
60785 index de8832d..0147b46 100644
60786 --- a/include/linux/sonet.h
60787 +++ b/include/linux/sonet.h
60788 @@ -61,7 +61,7 @@ struct sonet_stats {
60789 #include <linux/atomic.h>
60790
60791 struct k_sonet_stats {
60792 -#define __HANDLE_ITEM(i) atomic_t i
60793 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60794 __SONET_ITEMS
60795 #undef __HANDLE_ITEM
60796 };
60797 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
60798 index 3d8f9c4..69f1c0a 100644
60799 --- a/include/linux/sunrpc/clnt.h
60800 +++ b/include/linux/sunrpc/clnt.h
60801 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
60802 {
60803 switch (sap->sa_family) {
60804 case AF_INET:
60805 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
60806 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60807 case AF_INET6:
60808 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60809 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60810 }
60811 return 0;
60812 }
60813 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
60814 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60815 const struct sockaddr *src)
60816 {
60817 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60818 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60819 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60820
60821 dsin->sin_family = ssin->sin_family;
60822 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
60823 if (sa->sa_family != AF_INET6)
60824 return 0;
60825
60826 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60827 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60828 }
60829
60830 #endif /* __KERNEL__ */
60831 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
60832 index e775689..9e206d9 100644
60833 --- a/include/linux/sunrpc/sched.h
60834 +++ b/include/linux/sunrpc/sched.h
60835 @@ -105,6 +105,7 @@ struct rpc_call_ops {
60836 void (*rpc_call_done)(struct rpc_task *, void *);
60837 void (*rpc_release)(void *);
60838 };
60839 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
60840
60841 struct rpc_task_setup {
60842 struct rpc_task *task;
60843 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
60844 index c14fe86..393245e 100644
60845 --- a/include/linux/sunrpc/svc_rdma.h
60846 +++ b/include/linux/sunrpc/svc_rdma.h
60847 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60848 extern unsigned int svcrdma_max_requests;
60849 extern unsigned int svcrdma_max_req_size;
60850
60851 -extern atomic_t rdma_stat_recv;
60852 -extern atomic_t rdma_stat_read;
60853 -extern atomic_t rdma_stat_write;
60854 -extern atomic_t rdma_stat_sq_starve;
60855 -extern atomic_t rdma_stat_rq_starve;
60856 -extern atomic_t rdma_stat_rq_poll;
60857 -extern atomic_t rdma_stat_rq_prod;
60858 -extern atomic_t rdma_stat_sq_poll;
60859 -extern atomic_t rdma_stat_sq_prod;
60860 +extern atomic_unchecked_t rdma_stat_recv;
60861 +extern atomic_unchecked_t rdma_stat_read;
60862 +extern atomic_unchecked_t rdma_stat_write;
60863 +extern atomic_unchecked_t rdma_stat_sq_starve;
60864 +extern atomic_unchecked_t rdma_stat_rq_starve;
60865 +extern atomic_unchecked_t rdma_stat_rq_poll;
60866 +extern atomic_unchecked_t rdma_stat_rq_prod;
60867 +extern atomic_unchecked_t rdma_stat_sq_poll;
60868 +extern atomic_unchecked_t rdma_stat_sq_prod;
60869
60870 #define RPCRDMA_VERSION 1
60871
60872 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
60873 index 703cfa3..0b8ca72ac 100644
60874 --- a/include/linux/sysctl.h
60875 +++ b/include/linux/sysctl.h
60876 @@ -155,7 +155,11 @@ enum
60877 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60878 };
60879
60880 -
60881 +#ifdef CONFIG_PAX_SOFTMODE
60882 +enum {
60883 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60884 +};
60885 +#endif
60886
60887 /* CTL_VM names: */
60888 enum
60889 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
60890
60891 extern int proc_dostring(struct ctl_table *, int,
60892 void __user *, size_t *, loff_t *);
60893 +extern int proc_dostring_modpriv(struct ctl_table *, int,
60894 + void __user *, size_t *, loff_t *);
60895 extern int proc_dointvec(struct ctl_table *, int,
60896 void __user *, size_t *, loff_t *);
60897 extern int proc_dointvec_minmax(struct ctl_table *, int,
60898 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
60899 index ff7dc08..893e1bd 100644
60900 --- a/include/linux/tty_ldisc.h
60901 +++ b/include/linux/tty_ldisc.h
60902 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60903
60904 struct module *owner;
60905
60906 - int refcount;
60907 + atomic_t refcount;
60908 };
60909
60910 struct tty_ldisc {
60911 diff --git a/include/linux/types.h b/include/linux/types.h
60912 index 57a9723..dbe234a 100644
60913 --- a/include/linux/types.h
60914 +++ b/include/linux/types.h
60915 @@ -213,10 +213,26 @@ typedef struct {
60916 int counter;
60917 } atomic_t;
60918
60919 +#ifdef CONFIG_PAX_REFCOUNT
60920 +typedef struct {
60921 + int counter;
60922 +} atomic_unchecked_t;
60923 +#else
60924 +typedef atomic_t atomic_unchecked_t;
60925 +#endif
60926 +
60927 #ifdef CONFIG_64BIT
60928 typedef struct {
60929 long counter;
60930 } atomic64_t;
60931 +
60932 +#ifdef CONFIG_PAX_REFCOUNT
60933 +typedef struct {
60934 + long counter;
60935 +} atomic64_unchecked_t;
60936 +#else
60937 +typedef atomic64_t atomic64_unchecked_t;
60938 +#endif
60939 #endif
60940
60941 struct list_head {
60942 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
60943 index 5ca0951..ab496a5 100644
60944 --- a/include/linux/uaccess.h
60945 +++ b/include/linux/uaccess.h
60946 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
60947 long ret; \
60948 mm_segment_t old_fs = get_fs(); \
60949 \
60950 - set_fs(KERNEL_DS); \
60951 pagefault_disable(); \
60952 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60953 - pagefault_enable(); \
60954 + set_fs(KERNEL_DS); \
60955 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60956 set_fs(old_fs); \
60957 + pagefault_enable(); \
60958 ret; \
60959 })
60960
60961 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
60962 index 99c1b4d..bb94261 100644
60963 --- a/include/linux/unaligned/access_ok.h
60964 +++ b/include/linux/unaligned/access_ok.h
60965 @@ -6,32 +6,32 @@
60966
60967 static inline u16 get_unaligned_le16(const void *p)
60968 {
60969 - return le16_to_cpup((__le16 *)p);
60970 + return le16_to_cpup((const __le16 *)p);
60971 }
60972
60973 static inline u32 get_unaligned_le32(const void *p)
60974 {
60975 - return le32_to_cpup((__le32 *)p);
60976 + return le32_to_cpup((const __le32 *)p);
60977 }
60978
60979 static inline u64 get_unaligned_le64(const void *p)
60980 {
60981 - return le64_to_cpup((__le64 *)p);
60982 + return le64_to_cpup((const __le64 *)p);
60983 }
60984
60985 static inline u16 get_unaligned_be16(const void *p)
60986 {
60987 - return be16_to_cpup((__be16 *)p);
60988 + return be16_to_cpup((const __be16 *)p);
60989 }
60990
60991 static inline u32 get_unaligned_be32(const void *p)
60992 {
60993 - return be32_to_cpup((__be32 *)p);
60994 + return be32_to_cpup((const __be32 *)p);
60995 }
60996
60997 static inline u64 get_unaligned_be64(const void *p)
60998 {
60999 - return be64_to_cpup((__be64 *)p);
61000 + return be64_to_cpup((const __be64 *)p);
61001 }
61002
61003 static inline void put_unaligned_le16(u16 val, void *p)
61004 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61005 index e5a40c3..20ab0f6 100644
61006 --- a/include/linux/usb/renesas_usbhs.h
61007 +++ b/include/linux/usb/renesas_usbhs.h
61008 @@ -39,7 +39,7 @@ enum {
61009 */
61010 struct renesas_usbhs_driver_callback {
61011 int (*notify_hotplug)(struct platform_device *pdev);
61012 -};
61013 +} __no_const;
61014
61015 /*
61016 * callback functions for platform
61017 @@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61018 * VBUS control is needed for Host
61019 */
61020 int (*set_vbus)(struct platform_device *pdev, int enable);
61021 -};
61022 +} __no_const;
61023
61024 /*
61025 * parameters for renesas usbhs
61026 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61027 index 6f8fbcf..8259001 100644
61028 --- a/include/linux/vermagic.h
61029 +++ b/include/linux/vermagic.h
61030 @@ -25,9 +25,35 @@
61031 #define MODULE_ARCH_VERMAGIC ""
61032 #endif
61033
61034 +#ifdef CONFIG_PAX_REFCOUNT
61035 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
61036 +#else
61037 +#define MODULE_PAX_REFCOUNT ""
61038 +#endif
61039 +
61040 +#ifdef CONSTIFY_PLUGIN
61041 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61042 +#else
61043 +#define MODULE_CONSTIFY_PLUGIN ""
61044 +#endif
61045 +
61046 +#ifdef STACKLEAK_PLUGIN
61047 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61048 +#else
61049 +#define MODULE_STACKLEAK_PLUGIN ""
61050 +#endif
61051 +
61052 +#ifdef CONFIG_GRKERNSEC
61053 +#define MODULE_GRSEC "GRSEC "
61054 +#else
61055 +#define MODULE_GRSEC ""
61056 +#endif
61057 +
61058 #define VERMAGIC_STRING \
61059 UTS_RELEASE " " \
61060 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61061 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61062 - MODULE_ARCH_VERMAGIC
61063 + MODULE_ARCH_VERMAGIC \
61064 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61065 + MODULE_GRSEC
61066
61067 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61068 index 4bde182..aec92c1 100644
61069 --- a/include/linux/vmalloc.h
61070 +++ b/include/linux/vmalloc.h
61071 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61072 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61073 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61074 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61075 +
61076 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61077 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61078 +#endif
61079 +
61080 /* bits [20..32] reserved for arch specific ioremap internals */
61081
61082 /*
61083 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61084 # endif
61085 #endif
61086
61087 +#define vmalloc(x) \
61088 +({ \
61089 + void *___retval; \
61090 + intoverflow_t ___x = (intoverflow_t)x; \
61091 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61092 + ___retval = NULL; \
61093 + else \
61094 + ___retval = vmalloc((unsigned long)___x); \
61095 + ___retval; \
61096 +})
61097 +
61098 +#define vzalloc(x) \
61099 +({ \
61100 + void *___retval; \
61101 + intoverflow_t ___x = (intoverflow_t)x; \
61102 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61103 + ___retval = NULL; \
61104 + else \
61105 + ___retval = vzalloc((unsigned long)___x); \
61106 + ___retval; \
61107 +})
61108 +
61109 +#define __vmalloc(x, y, z) \
61110 +({ \
61111 + void *___retval; \
61112 + intoverflow_t ___x = (intoverflow_t)x; \
61113 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61114 + ___retval = NULL; \
61115 + else \
61116 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61117 + ___retval; \
61118 +})
61119 +
61120 +#define vmalloc_user(x) \
61121 +({ \
61122 + void *___retval; \
61123 + intoverflow_t ___x = (intoverflow_t)x; \
61124 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61125 + ___retval = NULL; \
61126 + else \
61127 + ___retval = vmalloc_user((unsigned long)___x); \
61128 + ___retval; \
61129 +})
61130 +
61131 +#define vmalloc_exec(x) \
61132 +({ \
61133 + void *___retval; \
61134 + intoverflow_t ___x = (intoverflow_t)x; \
61135 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61136 + ___retval = NULL; \
61137 + else \
61138 + ___retval = vmalloc_exec((unsigned long)___x); \
61139 + ___retval; \
61140 +})
61141 +
61142 +#define vmalloc_node(x, y) \
61143 +({ \
61144 + void *___retval; \
61145 + intoverflow_t ___x = (intoverflow_t)x; \
61146 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61147 + ___retval = NULL; \
61148 + else \
61149 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61150 + ___retval; \
61151 +})
61152 +
61153 +#define vzalloc_node(x, y) \
61154 +({ \
61155 + void *___retval; \
61156 + intoverflow_t ___x = (intoverflow_t)x; \
61157 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61158 + ___retval = NULL; \
61159 + else \
61160 + ___retval = vzalloc_node((unsigned long)___x, (y));\
61161 + ___retval; \
61162 +})
61163 +
61164 +#define vmalloc_32(x) \
61165 +({ \
61166 + void *___retval; \
61167 + intoverflow_t ___x = (intoverflow_t)x; \
61168 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61169 + ___retval = NULL; \
61170 + else \
61171 + ___retval = vmalloc_32((unsigned long)___x); \
61172 + ___retval; \
61173 +})
61174 +
61175 +#define vmalloc_32_user(x) \
61176 +({ \
61177 +void *___retval; \
61178 + intoverflow_t ___x = (intoverflow_t)x; \
61179 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61180 + ___retval = NULL; \
61181 + else \
61182 + ___retval = vmalloc_32_user((unsigned long)___x);\
61183 + ___retval; \
61184 +})
61185 +
61186 #endif /* _LINUX_VMALLOC_H */
61187 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61188 index 65efb92..137adbb 100644
61189 --- a/include/linux/vmstat.h
61190 +++ b/include/linux/vmstat.h
61191 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61192 /*
61193 * Zone based page accounting with per cpu differentials.
61194 */
61195 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61196 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61197
61198 static inline void zone_page_state_add(long x, struct zone *zone,
61199 enum zone_stat_item item)
61200 {
61201 - atomic_long_add(x, &zone->vm_stat[item]);
61202 - atomic_long_add(x, &vm_stat[item]);
61203 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61204 + atomic_long_add_unchecked(x, &vm_stat[item]);
61205 }
61206
61207 static inline unsigned long global_page_state(enum zone_stat_item item)
61208 {
61209 - long x = atomic_long_read(&vm_stat[item]);
61210 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61211 #ifdef CONFIG_SMP
61212 if (x < 0)
61213 x = 0;
61214 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61215 static inline unsigned long zone_page_state(struct zone *zone,
61216 enum zone_stat_item item)
61217 {
61218 - long x = atomic_long_read(&zone->vm_stat[item]);
61219 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61220 #ifdef CONFIG_SMP
61221 if (x < 0)
61222 x = 0;
61223 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61224 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61225 enum zone_stat_item item)
61226 {
61227 - long x = atomic_long_read(&zone->vm_stat[item]);
61228 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61229
61230 #ifdef CONFIG_SMP
61231 int cpu;
61232 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61233
61234 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61235 {
61236 - atomic_long_inc(&zone->vm_stat[item]);
61237 - atomic_long_inc(&vm_stat[item]);
61238 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61239 + atomic_long_inc_unchecked(&vm_stat[item]);
61240 }
61241
61242 static inline void __inc_zone_page_state(struct page *page,
61243 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61244
61245 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61246 {
61247 - atomic_long_dec(&zone->vm_stat[item]);
61248 - atomic_long_dec(&vm_stat[item]);
61249 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61250 + atomic_long_dec_unchecked(&vm_stat[item]);
61251 }
61252
61253 static inline void __dec_zone_page_state(struct page *page,
61254 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61255 index e5d1220..ef6e406 100644
61256 --- a/include/linux/xattr.h
61257 +++ b/include/linux/xattr.h
61258 @@ -57,6 +57,11 @@
61259 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61260 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61261
61262 +/* User namespace */
61263 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61264 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
61265 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61266 +
61267 #ifdef __KERNEL__
61268
61269 #include <linux/types.h>
61270 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61271 index 4aeff96..b378cdc 100644
61272 --- a/include/media/saa7146_vv.h
61273 +++ b/include/media/saa7146_vv.h
61274 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
61275 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61276
61277 /* the extension can override this */
61278 - struct v4l2_ioctl_ops ops;
61279 + v4l2_ioctl_ops_no_const ops;
61280 /* pointer to the saa7146 core ops */
61281 const struct v4l2_ioctl_ops *core_ops;
61282
61283 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61284 index c7c40f1..4f01585 100644
61285 --- a/include/media/v4l2-dev.h
61286 +++ b/include/media/v4l2-dev.h
61287 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61288
61289
61290 struct v4l2_file_operations {
61291 - struct module *owner;
61292 + struct module * const owner;
61293 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61294 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61295 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61296 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
61297 int (*open) (struct file *);
61298 int (*release) (struct file *);
61299 };
61300 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61301
61302 /*
61303 * Newer version of video_device, handled by videodev2.c
61304 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61305 index 4d1c74a..65e1221 100644
61306 --- a/include/media/v4l2-ioctl.h
61307 +++ b/include/media/v4l2-ioctl.h
61308 @@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61309 long (*vidioc_default) (struct file *file, void *fh,
61310 bool valid_prio, int cmd, void *arg);
61311 };
61312 -
61313 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61314
61315 /* v4l debugging and diagnostics */
61316
61317 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61318 index 8d55251..dfe5b0a 100644
61319 --- a/include/net/caif/caif_hsi.h
61320 +++ b/include/net/caif/caif_hsi.h
61321 @@ -98,7 +98,7 @@ struct cfhsi_drv {
61322 void (*rx_done_cb) (struct cfhsi_drv *drv);
61323 void (*wake_up_cb) (struct cfhsi_drv *drv);
61324 void (*wake_down_cb) (struct cfhsi_drv *drv);
61325 -};
61326 +} __no_const;
61327
61328 /* Structure implemented by HSI device. */
61329 struct cfhsi_dev {
61330 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61331 index 9e5425b..8136ffc 100644
61332 --- a/include/net/caif/cfctrl.h
61333 +++ b/include/net/caif/cfctrl.h
61334 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
61335 void (*radioset_rsp)(void);
61336 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61337 struct cflayer *client_layer);
61338 -};
61339 +} __no_const;
61340
61341 /* Link Setup Parameters for CAIF-Links. */
61342 struct cfctrl_link_param {
61343 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
61344 struct cfctrl {
61345 struct cfsrvl serv;
61346 struct cfctrl_rsp res;
61347 - atomic_t req_seq_no;
61348 - atomic_t rsp_seq_no;
61349 + atomic_unchecked_t req_seq_no;
61350 + atomic_unchecked_t rsp_seq_no;
61351 struct list_head list;
61352 /* Protects from simultaneous access to first_req list */
61353 spinlock_t info_list_lock;
61354 diff --git a/include/net/flow.h b/include/net/flow.h
61355 index 57f15a7..0de26c6 100644
61356 --- a/include/net/flow.h
61357 +++ b/include/net/flow.h
61358 @@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61359
61360 extern void flow_cache_flush(void);
61361 extern void flow_cache_flush_deferred(void);
61362 -extern atomic_t flow_cache_genid;
61363 +extern atomic_unchecked_t flow_cache_genid;
61364
61365 #endif
61366 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61367 index e9ff3fc..9d3e5c7 100644
61368 --- a/include/net/inetpeer.h
61369 +++ b/include/net/inetpeer.h
61370 @@ -48,8 +48,8 @@ struct inet_peer {
61371 */
61372 union {
61373 struct {
61374 - atomic_t rid; /* Frag reception counter */
61375 - atomic_t ip_id_count; /* IP ID for the next packet */
61376 + atomic_unchecked_t rid; /* Frag reception counter */
61377 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61378 __u32 tcp_ts;
61379 __u32 tcp_ts_stamp;
61380 };
61381 @@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61382 more++;
61383 inet_peer_refcheck(p);
61384 do {
61385 - old = atomic_read(&p->ip_id_count);
61386 + old = atomic_read_unchecked(&p->ip_id_count);
61387 new = old + more;
61388 if (!new)
61389 new = 1;
61390 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61391 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61392 return new;
61393 }
61394
61395 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61396 index 10422ef..662570f 100644
61397 --- a/include/net/ip_fib.h
61398 +++ b/include/net/ip_fib.h
61399 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61400
61401 #define FIB_RES_SADDR(net, res) \
61402 ((FIB_RES_NH(res).nh_saddr_genid == \
61403 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61404 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61405 FIB_RES_NH(res).nh_saddr : \
61406 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61407 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61408 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61409 index e5a7b9a..f4fc44b 100644
61410 --- a/include/net/ip_vs.h
61411 +++ b/include/net/ip_vs.h
61412 @@ -509,7 +509,7 @@ struct ip_vs_conn {
61413 struct ip_vs_conn *control; /* Master control connection */
61414 atomic_t n_control; /* Number of controlled ones */
61415 struct ip_vs_dest *dest; /* real server */
61416 - atomic_t in_pkts; /* incoming packet counter */
61417 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61418
61419 /* packet transmitter for different forwarding methods. If it
61420 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61421 @@ -647,7 +647,7 @@ struct ip_vs_dest {
61422 __be16 port; /* port number of the server */
61423 union nf_inet_addr addr; /* IP address of the server */
61424 volatile unsigned flags; /* dest status flags */
61425 - atomic_t conn_flags; /* flags to copy to conn */
61426 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61427 atomic_t weight; /* server weight */
61428
61429 atomic_t refcnt; /* reference counter */
61430 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61431 index 69b610a..fe3962c 100644
61432 --- a/include/net/irda/ircomm_core.h
61433 +++ b/include/net/irda/ircomm_core.h
61434 @@ -51,7 +51,7 @@ typedef struct {
61435 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61436 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61437 struct ircomm_info *);
61438 -} call_t;
61439 +} __no_const call_t;
61440
61441 struct ircomm_cb {
61442 irda_queue_t queue;
61443 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61444 index 59ba38bc..d515662 100644
61445 --- a/include/net/irda/ircomm_tty.h
61446 +++ b/include/net/irda/ircomm_tty.h
61447 @@ -35,6 +35,7 @@
61448 #include <linux/termios.h>
61449 #include <linux/timer.h>
61450 #include <linux/tty.h> /* struct tty_struct */
61451 +#include <asm/local.h>
61452
61453 #include <net/irda/irias_object.h>
61454 #include <net/irda/ircomm_core.h>
61455 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61456 unsigned short close_delay;
61457 unsigned short closing_wait; /* time to wait before closing */
61458
61459 - int open_count;
61460 - int blocked_open; /* # of blocked opens */
61461 + local_t open_count;
61462 + local_t blocked_open; /* # of blocked opens */
61463
61464 /* Protect concurent access to :
61465 * o self->open_count
61466 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61467 index f2419cf..473679f 100644
61468 --- a/include/net/iucv/af_iucv.h
61469 +++ b/include/net/iucv/af_iucv.h
61470 @@ -139,7 +139,7 @@ struct iucv_sock {
61471 struct iucv_sock_list {
61472 struct hlist_head head;
61473 rwlock_t lock;
61474 - atomic_t autobind_name;
61475 + atomic_unchecked_t autobind_name;
61476 };
61477
61478 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61479 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61480 index 2720884..3aa5c25 100644
61481 --- a/include/net/neighbour.h
61482 +++ b/include/net/neighbour.h
61483 @@ -122,7 +122,7 @@ struct neigh_ops {
61484 void (*error_report)(struct neighbour *, struct sk_buff *);
61485 int (*output)(struct neighbour *, struct sk_buff *);
61486 int (*connected_output)(struct neighbour *, struct sk_buff *);
61487 -};
61488 +} __do_const;
61489
61490 struct pneigh_entry {
61491 struct pneigh_entry *next;
61492 diff --git a/include/net/netlink.h b/include/net/netlink.h
61493 index cb1f350..3279d2c 100644
61494 --- a/include/net/netlink.h
61495 +++ b/include/net/netlink.h
61496 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61497 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61498 {
61499 if (mark)
61500 - skb_trim(skb, (unsigned char *) mark - skb->data);
61501 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61502 }
61503
61504 /**
61505 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61506 index d786b4f..4c3dd41 100644
61507 --- a/include/net/netns/ipv4.h
61508 +++ b/include/net/netns/ipv4.h
61509 @@ -56,8 +56,8 @@ struct netns_ipv4 {
61510
61511 unsigned int sysctl_ping_group_range[2];
61512
61513 - atomic_t rt_genid;
61514 - atomic_t dev_addr_genid;
61515 + atomic_unchecked_t rt_genid;
61516 + atomic_unchecked_t dev_addr_genid;
61517
61518 #ifdef CONFIG_IP_MROUTE
61519 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61520 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61521 index 6a72a58..e6a127d 100644
61522 --- a/include/net/sctp/sctp.h
61523 +++ b/include/net/sctp/sctp.h
61524 @@ -318,9 +318,9 @@ do { \
61525
61526 #else /* SCTP_DEBUG */
61527
61528 -#define SCTP_DEBUG_PRINTK(whatever...)
61529 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61530 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61531 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61532 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61533 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61534 #define SCTP_ENABLE_DEBUG
61535 #define SCTP_DISABLE_DEBUG
61536 #define SCTP_ASSERT(expr, str, func)
61537 diff --git a/include/net/sock.h b/include/net/sock.h
61538 index 32e3937..87a1dbc 100644
61539 --- a/include/net/sock.h
61540 +++ b/include/net/sock.h
61541 @@ -277,7 +277,7 @@ struct sock {
61542 #ifdef CONFIG_RPS
61543 __u32 sk_rxhash;
61544 #endif
61545 - atomic_t sk_drops;
61546 + atomic_unchecked_t sk_drops;
61547 int sk_rcvbuf;
61548
61549 struct sk_filter __rcu *sk_filter;
61550 @@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61551 }
61552
61553 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61554 - char __user *from, char *to,
61555 + char __user *from, unsigned char *to,
61556 int copy, int offset)
61557 {
61558 if (skb->ip_summed == CHECKSUM_NONE) {
61559 diff --git a/include/net/tcp.h b/include/net/tcp.h
61560 index bb18c4d..bb87972 100644
61561 --- a/include/net/tcp.h
61562 +++ b/include/net/tcp.h
61563 @@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
61564 char *name;
61565 sa_family_t family;
61566 const struct file_operations *seq_fops;
61567 - struct seq_operations seq_ops;
61568 + seq_operations_no_const seq_ops;
61569 };
61570
61571 struct tcp_iter_state {
61572 diff --git a/include/net/udp.h b/include/net/udp.h
61573 index 3b285f4..0219639 100644
61574 --- a/include/net/udp.h
61575 +++ b/include/net/udp.h
61576 @@ -237,7 +237,7 @@ struct udp_seq_afinfo {
61577 sa_family_t family;
61578 struct udp_table *udp_table;
61579 const struct file_operations *seq_fops;
61580 - struct seq_operations seq_ops;
61581 + seq_operations_no_const seq_ops;
61582 };
61583
61584 struct udp_iter_state {
61585 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
61586 index b203e14..1df3991 100644
61587 --- a/include/net/xfrm.h
61588 +++ b/include/net/xfrm.h
61589 @@ -505,7 +505,7 @@ struct xfrm_policy {
61590 struct timer_list timer;
61591
61592 struct flow_cache_object flo;
61593 - atomic_t genid;
61594 + atomic_unchecked_t genid;
61595 u32 priority;
61596 u32 index;
61597 struct xfrm_mark mark;
61598 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
61599 index 1a046b1..ee0bef0 100644
61600 --- a/include/rdma/iw_cm.h
61601 +++ b/include/rdma/iw_cm.h
61602 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
61603 int backlog);
61604
61605 int (*destroy_listen)(struct iw_cm_id *cm_id);
61606 -};
61607 +} __no_const;
61608
61609 /**
61610 * iw_create_cm_id - Create an IW CM identifier.
61611 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
61612 index 5d1a758..1dbf795 100644
61613 --- a/include/scsi/libfc.h
61614 +++ b/include/scsi/libfc.h
61615 @@ -748,6 +748,7 @@ struct libfc_function_template {
61616 */
61617 void (*disc_stop_final) (struct fc_lport *);
61618 };
61619 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61620
61621 /**
61622 * struct fc_disc - Discovery context
61623 @@ -851,7 +852,7 @@ struct fc_lport {
61624 struct fc_vport *vport;
61625
61626 /* Operational Information */
61627 - struct libfc_function_template tt;
61628 + libfc_function_template_no_const tt;
61629 u8 link_up;
61630 u8 qfull;
61631 enum fc_lport_state state;
61632 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
61633 index 5591ed5..13eb457 100644
61634 --- a/include/scsi/scsi_device.h
61635 +++ b/include/scsi/scsi_device.h
61636 @@ -161,9 +161,9 @@ struct scsi_device {
61637 unsigned int max_device_blocked; /* what device_blocked counts down from */
61638 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61639
61640 - atomic_t iorequest_cnt;
61641 - atomic_t iodone_cnt;
61642 - atomic_t ioerr_cnt;
61643 + atomic_unchecked_t iorequest_cnt;
61644 + atomic_unchecked_t iodone_cnt;
61645 + atomic_unchecked_t ioerr_cnt;
61646
61647 struct device sdev_gendev,
61648 sdev_dev;
61649 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
61650 index 2a65167..91e01f8 100644
61651 --- a/include/scsi/scsi_transport_fc.h
61652 +++ b/include/scsi/scsi_transport_fc.h
61653 @@ -711,7 +711,7 @@ struct fc_function_template {
61654 unsigned long show_host_system_hostname:1;
61655
61656 unsigned long disable_target_scan:1;
61657 -};
61658 +} __do_const;
61659
61660
61661 /**
61662 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
61663 index 030b87c..98a6954 100644
61664 --- a/include/sound/ak4xxx-adda.h
61665 +++ b/include/sound/ak4xxx-adda.h
61666 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61667 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61668 unsigned char val);
61669 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61670 -};
61671 +} __no_const;
61672
61673 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61674
61675 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
61676 index 8c05e47..2b5df97 100644
61677 --- a/include/sound/hwdep.h
61678 +++ b/include/sound/hwdep.h
61679 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61680 struct snd_hwdep_dsp_status *status);
61681 int (*dsp_load)(struct snd_hwdep *hw,
61682 struct snd_hwdep_dsp_image *image);
61683 -};
61684 +} __no_const;
61685
61686 struct snd_hwdep {
61687 struct snd_card *card;
61688 diff --git a/include/sound/info.h b/include/sound/info.h
61689 index 5492cc4..1a65278 100644
61690 --- a/include/sound/info.h
61691 +++ b/include/sound/info.h
61692 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
61693 struct snd_info_buffer *buffer);
61694 void (*write)(struct snd_info_entry *entry,
61695 struct snd_info_buffer *buffer);
61696 -};
61697 +} __no_const;
61698
61699 struct snd_info_entry_ops {
61700 int (*open)(struct snd_info_entry *entry,
61701 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
61702 index 0cf91b2..b70cae4 100644
61703 --- a/include/sound/pcm.h
61704 +++ b/include/sound/pcm.h
61705 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
61706 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61707 int (*ack)(struct snd_pcm_substream *substream);
61708 };
61709 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61710
61711 /*
61712 *
61713 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
61714 index af1b49e..a5d55a5 100644
61715 --- a/include/sound/sb16_csp.h
61716 +++ b/include/sound/sb16_csp.h
61717 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61718 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61719 int (*csp_stop) (struct snd_sb_csp * p);
61720 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61721 -};
61722 +} __no_const;
61723
61724 /*
61725 * CSP private data
61726 diff --git a/include/sound/soc.h b/include/sound/soc.h
61727 index 11cfb59..e3f93f4 100644
61728 --- a/include/sound/soc.h
61729 +++ b/include/sound/soc.h
61730 @@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
61731 /* platform IO - used for platform DAPM */
61732 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61733 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61734 -};
61735 +} __do_const;
61736
61737 struct snd_soc_platform {
61738 const char *name;
61739 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
61740 index 444cd6b..3327cc5 100644
61741 --- a/include/sound/ymfpci.h
61742 +++ b/include/sound/ymfpci.h
61743 @@ -358,7 +358,7 @@ struct snd_ymfpci {
61744 spinlock_t reg_lock;
61745 spinlock_t voice_lock;
61746 wait_queue_head_t interrupt_sleep;
61747 - atomic_t interrupt_sleep_count;
61748 + atomic_unchecked_t interrupt_sleep_count;
61749 struct snd_info_entry *proc_entry;
61750 const struct firmware *dsp_microcode;
61751 const struct firmware *controller_microcode;
61752 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
61753 index a79886c..b483af6 100644
61754 --- a/include/target/target_core_base.h
61755 +++ b/include/target/target_core_base.h
61756 @@ -346,7 +346,7 @@ struct t10_reservation_ops {
61757 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61758 int (*t10_pr_register)(struct se_cmd *);
61759 int (*t10_pr_clear)(struct se_cmd *);
61760 -};
61761 +} __no_const;
61762
61763 struct t10_reservation {
61764 /* Reservation effects all target ports */
61765 @@ -465,8 +465,8 @@ struct se_cmd {
61766 atomic_t t_se_count;
61767 atomic_t t_task_cdbs_left;
61768 atomic_t t_task_cdbs_ex_left;
61769 - atomic_t t_task_cdbs_sent;
61770 - atomic_t t_transport_aborted;
61771 + atomic_unchecked_t t_task_cdbs_sent;
61772 + atomic_unchecked_t t_transport_aborted;
61773 atomic_t t_transport_active;
61774 atomic_t t_transport_complete;
61775 atomic_t t_transport_queue_active;
61776 @@ -704,7 +704,7 @@ struct se_device {
61777 /* Active commands on this virtual SE device */
61778 atomic_t simple_cmds;
61779 atomic_t depth_left;
61780 - atomic_t dev_ordered_id;
61781 + atomic_unchecked_t dev_ordered_id;
61782 atomic_t execute_tasks;
61783 atomic_t dev_ordered_sync;
61784 atomic_t dev_qf_count;
61785 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
61786 index 1c09820..7f5ec79 100644
61787 --- a/include/trace/events/irq.h
61788 +++ b/include/trace/events/irq.h
61789 @@ -36,7 +36,7 @@ struct softirq_action;
61790 */
61791 TRACE_EVENT(irq_handler_entry,
61792
61793 - TP_PROTO(int irq, struct irqaction *action),
61794 + TP_PROTO(int irq, const struct irqaction *action),
61795
61796 TP_ARGS(irq, action),
61797
61798 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61799 */
61800 TRACE_EVENT(irq_handler_exit,
61801
61802 - TP_PROTO(int irq, struct irqaction *action, int ret),
61803 + TP_PROTO(int irq, const struct irqaction *action, int ret),
61804
61805 TP_ARGS(irq, action, ret),
61806
61807 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
61808 index c41f308..6918de3 100644
61809 --- a/include/video/udlfb.h
61810 +++ b/include/video/udlfb.h
61811 @@ -52,10 +52,10 @@ struct dlfb_data {
61812 u32 pseudo_palette[256];
61813 int blank_mode; /*one of FB_BLANK_ */
61814 /* blit-only rendering path metrics, exposed through sysfs */
61815 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61816 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61817 - atomic_t bytes_sent; /* to usb, after compression including overhead */
61818 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61819 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61820 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61821 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61822 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61823 };
61824
61825 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61826 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
61827 index 0993a22..32ba2fe 100644
61828 --- a/include/video/uvesafb.h
61829 +++ b/include/video/uvesafb.h
61830 @@ -177,6 +177,7 @@ struct uvesafb_par {
61831 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61832 u8 pmi_setpal; /* PMI for palette changes */
61833 u16 *pmi_base; /* protected mode interface location */
61834 + u8 *pmi_code; /* protected mode code location */
61835 void *pmi_start;
61836 void *pmi_pal;
61837 u8 *vbe_state_orig; /*
61838 diff --git a/init/Kconfig b/init/Kconfig
61839 index 43298f9..2f56c12 100644
61840 --- a/init/Kconfig
61841 +++ b/init/Kconfig
61842 @@ -1214,7 +1214,7 @@ config SLUB_DEBUG
61843
61844 config COMPAT_BRK
61845 bool "Disable heap randomization"
61846 - default y
61847 + default n
61848 help
61849 Randomizing heap placement makes heap exploits harder, but it
61850 also breaks ancient binaries (including anything libc5 based).
61851 diff --git a/init/do_mounts.c b/init/do_mounts.c
61852 index db6e5ee..7677ff7 100644
61853 --- a/init/do_mounts.c
61854 +++ b/init/do_mounts.c
61855 @@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
61856
61857 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61858 {
61859 - int err = sys_mount(name, "/root", fs, flags, data);
61860 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61861 if (err)
61862 return err;
61863
61864 - sys_chdir((const char __user __force *)"/root");
61865 + sys_chdir((const char __force_user*)"/root");
61866 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61867 printk(KERN_INFO
61868 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61869 @@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
61870 va_start(args, fmt);
61871 vsprintf(buf, fmt, args);
61872 va_end(args);
61873 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61874 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61875 if (fd >= 0) {
61876 sys_ioctl(fd, FDEJECT, 0);
61877 sys_close(fd);
61878 }
61879 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61880 - fd = sys_open("/dev/console", O_RDWR, 0);
61881 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61882 if (fd >= 0) {
61883 sys_ioctl(fd, TCGETS, (long)&termios);
61884 termios.c_lflag &= ~ICANON;
61885 sys_ioctl(fd, TCSETSF, (long)&termios);
61886 - sys_read(fd, &c, 1);
61887 + sys_read(fd, (char __user *)&c, 1);
61888 termios.c_lflag |= ICANON;
61889 sys_ioctl(fd, TCSETSF, (long)&termios);
61890 sys_close(fd);
61891 @@ -553,6 +553,6 @@ void __init prepare_namespace(void)
61892 mount_root();
61893 out:
61894 devtmpfs_mount("dev");
61895 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61896 - sys_chroot((const char __user __force *)".");
61897 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61898 + sys_chroot((const char __force_user *)".");
61899 }
61900 diff --git a/init/do_mounts.h b/init/do_mounts.h
61901 index f5b978a..69dbfe8 100644
61902 --- a/init/do_mounts.h
61903 +++ b/init/do_mounts.h
61904 @@ -15,15 +15,15 @@ extern int root_mountflags;
61905
61906 static inline int create_dev(char *name, dev_t dev)
61907 {
61908 - sys_unlink(name);
61909 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61910 + sys_unlink((char __force_user *)name);
61911 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61912 }
61913
61914 #if BITS_PER_LONG == 32
61915 static inline u32 bstat(char *name)
61916 {
61917 struct stat64 stat;
61918 - if (sys_stat64(name, &stat) != 0)
61919 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61920 return 0;
61921 if (!S_ISBLK(stat.st_mode))
61922 return 0;
61923 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61924 static inline u32 bstat(char *name)
61925 {
61926 struct stat stat;
61927 - if (sys_newstat(name, &stat) != 0)
61928 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61929 return 0;
61930 if (!S_ISBLK(stat.st_mode))
61931 return 0;
61932 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
61933 index 3098a38..253064e 100644
61934 --- a/init/do_mounts_initrd.c
61935 +++ b/init/do_mounts_initrd.c
61936 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61937 create_dev("/dev/root.old", Root_RAM0);
61938 /* mount initrd on rootfs' /root */
61939 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61940 - sys_mkdir("/old", 0700);
61941 - root_fd = sys_open("/", 0, 0);
61942 - old_fd = sys_open("/old", 0, 0);
61943 + sys_mkdir((const char __force_user *)"/old", 0700);
61944 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
61945 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61946 /* move initrd over / and chdir/chroot in initrd root */
61947 - sys_chdir("/root");
61948 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61949 - sys_chroot(".");
61950 + sys_chdir((const char __force_user *)"/root");
61951 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61952 + sys_chroot((const char __force_user *)".");
61953
61954 /*
61955 * In case that a resume from disk is carried out by linuxrc or one of
61956 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61957
61958 /* move initrd to rootfs' /old */
61959 sys_fchdir(old_fd);
61960 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
61961 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61962 /* switch root and cwd back to / of rootfs */
61963 sys_fchdir(root_fd);
61964 - sys_chroot(".");
61965 + sys_chroot((const char __force_user *)".");
61966 sys_close(old_fd);
61967 sys_close(root_fd);
61968
61969 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61970 - sys_chdir("/old");
61971 + sys_chdir((const char __force_user *)"/old");
61972 return;
61973 }
61974
61975 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
61976 mount_root();
61977
61978 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61979 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61980 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
61981 if (!error)
61982 printk("okay\n");
61983 else {
61984 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
61985 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
61986 if (error == -ENOENT)
61987 printk("/initrd does not exist. Ignored.\n");
61988 else
61989 printk("failed\n");
61990 printk(KERN_NOTICE "Unmounting old root\n");
61991 - sys_umount("/old", MNT_DETACH);
61992 + sys_umount((char __force_user *)"/old", MNT_DETACH);
61993 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61994 if (fd < 0) {
61995 error = fd;
61996 @@ -116,11 +116,11 @@ int __init initrd_load(void)
61997 * mounted in the normal path.
61998 */
61999 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62000 - sys_unlink("/initrd.image");
62001 + sys_unlink((const char __force_user *)"/initrd.image");
62002 handle_initrd();
62003 return 1;
62004 }
62005 }
62006 - sys_unlink("/initrd.image");
62007 + sys_unlink((const char __force_user *)"/initrd.image");
62008 return 0;
62009 }
62010 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62011 index 32c4799..c27ee74 100644
62012 --- a/init/do_mounts_md.c
62013 +++ b/init/do_mounts_md.c
62014 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62015 partitioned ? "_d" : "", minor,
62016 md_setup_args[ent].device_names);
62017
62018 - fd = sys_open(name, 0, 0);
62019 + fd = sys_open((char __force_user *)name, 0, 0);
62020 if (fd < 0) {
62021 printk(KERN_ERR "md: open failed - cannot start "
62022 "array %s\n", name);
62023 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62024 * array without it
62025 */
62026 sys_close(fd);
62027 - fd = sys_open(name, 0, 0);
62028 + fd = sys_open((char __force_user *)name, 0, 0);
62029 sys_ioctl(fd, BLKRRPART, 0);
62030 }
62031 sys_close(fd);
62032 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62033
62034 wait_for_device_probe();
62035
62036 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62037 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62038 if (fd >= 0) {
62039 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62040 sys_close(fd);
62041 diff --git a/init/initramfs.c b/init/initramfs.c
62042 index 2531811..040d4d4 100644
62043 --- a/init/initramfs.c
62044 +++ b/init/initramfs.c
62045 @@ -74,7 +74,7 @@ static void __init free_hash(void)
62046 }
62047 }
62048
62049 -static long __init do_utime(char __user *filename, time_t mtime)
62050 +static long __init do_utime(__force char __user *filename, time_t mtime)
62051 {
62052 struct timespec t[2];
62053
62054 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
62055 struct dir_entry *de, *tmp;
62056 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62057 list_del(&de->list);
62058 - do_utime(de->name, de->mtime);
62059 + do_utime((char __force_user *)de->name, de->mtime);
62060 kfree(de->name);
62061 kfree(de);
62062 }
62063 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
62064 if (nlink >= 2) {
62065 char *old = find_link(major, minor, ino, mode, collected);
62066 if (old)
62067 - return (sys_link(old, collected) < 0) ? -1 : 1;
62068 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62069 }
62070 return 0;
62071 }
62072 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62073 {
62074 struct stat st;
62075
62076 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62077 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62078 if (S_ISDIR(st.st_mode))
62079 - sys_rmdir(path);
62080 + sys_rmdir((char __force_user *)path);
62081 else
62082 - sys_unlink(path);
62083 + sys_unlink((char __force_user *)path);
62084 }
62085 }
62086
62087 @@ -305,7 +305,7 @@ static int __init do_name(void)
62088 int openflags = O_WRONLY|O_CREAT;
62089 if (ml != 1)
62090 openflags |= O_TRUNC;
62091 - wfd = sys_open(collected, openflags, mode);
62092 + wfd = sys_open((char __force_user *)collected, openflags, mode);
62093
62094 if (wfd >= 0) {
62095 sys_fchown(wfd, uid, gid);
62096 @@ -317,17 +317,17 @@ static int __init do_name(void)
62097 }
62098 }
62099 } else if (S_ISDIR(mode)) {
62100 - sys_mkdir(collected, mode);
62101 - sys_chown(collected, uid, gid);
62102 - sys_chmod(collected, mode);
62103 + sys_mkdir((char __force_user *)collected, mode);
62104 + sys_chown((char __force_user *)collected, uid, gid);
62105 + sys_chmod((char __force_user *)collected, mode);
62106 dir_add(collected, mtime);
62107 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62108 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62109 if (maybe_link() == 0) {
62110 - sys_mknod(collected, mode, rdev);
62111 - sys_chown(collected, uid, gid);
62112 - sys_chmod(collected, mode);
62113 - do_utime(collected, mtime);
62114 + sys_mknod((char __force_user *)collected, mode, rdev);
62115 + sys_chown((char __force_user *)collected, uid, gid);
62116 + sys_chmod((char __force_user *)collected, mode);
62117 + do_utime((char __force_user *)collected, mtime);
62118 }
62119 }
62120 return 0;
62121 @@ -336,15 +336,15 @@ static int __init do_name(void)
62122 static int __init do_copy(void)
62123 {
62124 if (count >= body_len) {
62125 - sys_write(wfd, victim, body_len);
62126 + sys_write(wfd, (char __force_user *)victim, body_len);
62127 sys_close(wfd);
62128 - do_utime(vcollected, mtime);
62129 + do_utime((char __force_user *)vcollected, mtime);
62130 kfree(vcollected);
62131 eat(body_len);
62132 state = SkipIt;
62133 return 0;
62134 } else {
62135 - sys_write(wfd, victim, count);
62136 + sys_write(wfd, (char __force_user *)victim, count);
62137 body_len -= count;
62138 eat(count);
62139 return 1;
62140 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
62141 {
62142 collected[N_ALIGN(name_len) + body_len] = '\0';
62143 clean_path(collected, 0);
62144 - sys_symlink(collected + N_ALIGN(name_len), collected);
62145 - sys_lchown(collected, uid, gid);
62146 - do_utime(collected, mtime);
62147 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62148 + sys_lchown((char __force_user *)collected, uid, gid);
62149 + do_utime((char __force_user *)collected, mtime);
62150 state = SkipIt;
62151 next_state = Reset;
62152 return 0;
62153 diff --git a/init/main.c b/init/main.c
62154 index 217ed23..32e5731 100644
62155 --- a/init/main.c
62156 +++ b/init/main.c
62157 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62158 extern void tc_init(void);
62159 #endif
62160
62161 +extern void grsecurity_init(void);
62162 +
62163 /*
62164 * Debug helper: via this flag we know that we are in 'early bootup code'
62165 * where only the boot processor is running with IRQ disabled. This means
62166 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62167
62168 __setup("reset_devices", set_reset_devices);
62169
62170 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62171 +extern char pax_enter_kernel_user[];
62172 +extern char pax_exit_kernel_user[];
62173 +extern pgdval_t clone_pgd_mask;
62174 +#endif
62175 +
62176 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62177 +static int __init setup_pax_nouderef(char *str)
62178 +{
62179 +#ifdef CONFIG_X86_32
62180 + unsigned int cpu;
62181 + struct desc_struct *gdt;
62182 +
62183 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
62184 + gdt = get_cpu_gdt_table(cpu);
62185 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62186 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62187 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62188 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62189 + }
62190 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62191 +#else
62192 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62193 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62194 + clone_pgd_mask = ~(pgdval_t)0UL;
62195 +#endif
62196 +
62197 + return 0;
62198 +}
62199 +early_param("pax_nouderef", setup_pax_nouderef);
62200 +#endif
62201 +
62202 +#ifdef CONFIG_PAX_SOFTMODE
62203 +int pax_softmode;
62204 +
62205 +static int __init setup_pax_softmode(char *str)
62206 +{
62207 + get_option(&str, &pax_softmode);
62208 + return 1;
62209 +}
62210 +__setup("pax_softmode=", setup_pax_softmode);
62211 +#endif
62212 +
62213 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62214 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62215 static const char *panic_later, *panic_param;
62216 @@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62217 {
62218 int count = preempt_count();
62219 int ret;
62220 + const char *msg1 = "", *msg2 = "";
62221
62222 if (initcall_debug)
62223 ret = do_one_initcall_debug(fn);
62224 @@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62225 sprintf(msgbuf, "error code %d ", ret);
62226
62227 if (preempt_count() != count) {
62228 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62229 + msg1 = " preemption imbalance";
62230 preempt_count() = count;
62231 }
62232 if (irqs_disabled()) {
62233 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62234 + msg2 = " disabled interrupts";
62235 local_irq_enable();
62236 }
62237 - if (msgbuf[0]) {
62238 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62239 + if (msgbuf[0] || *msg1 || *msg2) {
62240 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62241 }
62242
62243 return ret;
62244 @@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62245 do_basic_setup();
62246
62247 /* Open the /dev/console on the rootfs, this should never fail */
62248 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62249 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62250 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62251
62252 (void) sys_dup(0);
62253 @@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62254 if (!ramdisk_execute_command)
62255 ramdisk_execute_command = "/init";
62256
62257 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62258 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62259 ramdisk_execute_command = NULL;
62260 prepare_namespace();
62261 }
62262
62263 + grsecurity_init();
62264 +
62265 /*
62266 * Ok, we have completed the initial bootup, and
62267 * we're essentially up and running. Get rid of the
62268 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62269 index 5b4293d..f179875 100644
62270 --- a/ipc/mqueue.c
62271 +++ b/ipc/mqueue.c
62272 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62273 mq_bytes = (mq_msg_tblsz +
62274 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62275
62276 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62277 spin_lock(&mq_lock);
62278 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62279 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62280 diff --git a/ipc/msg.c b/ipc/msg.c
62281 index 7385de2..a8180e0 100644
62282 --- a/ipc/msg.c
62283 +++ b/ipc/msg.c
62284 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62285 return security_msg_queue_associate(msq, msgflg);
62286 }
62287
62288 +static struct ipc_ops msg_ops = {
62289 + .getnew = newque,
62290 + .associate = msg_security,
62291 + .more_checks = NULL
62292 +};
62293 +
62294 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62295 {
62296 struct ipc_namespace *ns;
62297 - struct ipc_ops msg_ops;
62298 struct ipc_params msg_params;
62299
62300 ns = current->nsproxy->ipc_ns;
62301
62302 - msg_ops.getnew = newque;
62303 - msg_ops.associate = msg_security;
62304 - msg_ops.more_checks = NULL;
62305 -
62306 msg_params.key = key;
62307 msg_params.flg = msgflg;
62308
62309 diff --git a/ipc/sem.c b/ipc/sem.c
62310 index 5215a81..cfc0cac 100644
62311 --- a/ipc/sem.c
62312 +++ b/ipc/sem.c
62313 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62314 return 0;
62315 }
62316
62317 +static struct ipc_ops sem_ops = {
62318 + .getnew = newary,
62319 + .associate = sem_security,
62320 + .more_checks = sem_more_checks
62321 +};
62322 +
62323 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62324 {
62325 struct ipc_namespace *ns;
62326 - struct ipc_ops sem_ops;
62327 struct ipc_params sem_params;
62328
62329 ns = current->nsproxy->ipc_ns;
62330 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62331 if (nsems < 0 || nsems > ns->sc_semmsl)
62332 return -EINVAL;
62333
62334 - sem_ops.getnew = newary;
62335 - sem_ops.associate = sem_security;
62336 - sem_ops.more_checks = sem_more_checks;
62337 -
62338 sem_params.key = key;
62339 sem_params.flg = semflg;
62340 sem_params.u.nsems = nsems;
62341 diff --git a/ipc/shm.c b/ipc/shm.c
62342 index b76be5b..859e750 100644
62343 --- a/ipc/shm.c
62344 +++ b/ipc/shm.c
62345 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62346 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62347 #endif
62348
62349 +#ifdef CONFIG_GRKERNSEC
62350 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62351 + const time_t shm_createtime, const uid_t cuid,
62352 + const int shmid);
62353 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62354 + const time_t shm_createtime);
62355 +#endif
62356 +
62357 void shm_init_ns(struct ipc_namespace *ns)
62358 {
62359 ns->shm_ctlmax = SHMMAX;
62360 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62361 shp->shm_lprid = 0;
62362 shp->shm_atim = shp->shm_dtim = 0;
62363 shp->shm_ctim = get_seconds();
62364 +#ifdef CONFIG_GRKERNSEC
62365 + {
62366 + struct timespec timeval;
62367 + do_posix_clock_monotonic_gettime(&timeval);
62368 +
62369 + shp->shm_createtime = timeval.tv_sec;
62370 + }
62371 +#endif
62372 shp->shm_segsz = size;
62373 shp->shm_nattch = 0;
62374 shp->shm_file = file;
62375 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62376 return 0;
62377 }
62378
62379 +static struct ipc_ops shm_ops = {
62380 + .getnew = newseg,
62381 + .associate = shm_security,
62382 + .more_checks = shm_more_checks
62383 +};
62384 +
62385 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62386 {
62387 struct ipc_namespace *ns;
62388 - struct ipc_ops shm_ops;
62389 struct ipc_params shm_params;
62390
62391 ns = current->nsproxy->ipc_ns;
62392
62393 - shm_ops.getnew = newseg;
62394 - shm_ops.associate = shm_security;
62395 - shm_ops.more_checks = shm_more_checks;
62396 -
62397 shm_params.key = key;
62398 shm_params.flg = shmflg;
62399 shm_params.u.size = size;
62400 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62401 f_mode = FMODE_READ | FMODE_WRITE;
62402 }
62403 if (shmflg & SHM_EXEC) {
62404 +
62405 +#ifdef CONFIG_PAX_MPROTECT
62406 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
62407 + goto out;
62408 +#endif
62409 +
62410 prot |= PROT_EXEC;
62411 acc_mode |= S_IXUGO;
62412 }
62413 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62414 if (err)
62415 goto out_unlock;
62416
62417 +#ifdef CONFIG_GRKERNSEC
62418 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62419 + shp->shm_perm.cuid, shmid) ||
62420 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62421 + err = -EACCES;
62422 + goto out_unlock;
62423 + }
62424 +#endif
62425 +
62426 path = shp->shm_file->f_path;
62427 path_get(&path);
62428 shp->shm_nattch++;
62429 +#ifdef CONFIG_GRKERNSEC
62430 + shp->shm_lapid = current->pid;
62431 +#endif
62432 size = i_size_read(path.dentry->d_inode);
62433 shm_unlock(shp);
62434
62435 diff --git a/kernel/acct.c b/kernel/acct.c
62436 index fa7eb3d..7faf116 100644
62437 --- a/kernel/acct.c
62438 +++ b/kernel/acct.c
62439 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62440 */
62441 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62442 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62443 - file->f_op->write(file, (char *)&ac,
62444 + file->f_op->write(file, (char __force_user *)&ac,
62445 sizeof(acct_t), &file->f_pos);
62446 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62447 set_fs(fs);
62448 diff --git a/kernel/audit.c b/kernel/audit.c
62449 index 09fae26..ed71d5b 100644
62450 --- a/kernel/audit.c
62451 +++ b/kernel/audit.c
62452 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62453 3) suppressed due to audit_rate_limit
62454 4) suppressed due to audit_backlog_limit
62455 */
62456 -static atomic_t audit_lost = ATOMIC_INIT(0);
62457 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62458
62459 /* The netlink socket. */
62460 static struct sock *audit_sock;
62461 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62462 unsigned long now;
62463 int print;
62464
62465 - atomic_inc(&audit_lost);
62466 + atomic_inc_unchecked(&audit_lost);
62467
62468 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62469
62470 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62471 printk(KERN_WARNING
62472 "audit: audit_lost=%d audit_rate_limit=%d "
62473 "audit_backlog_limit=%d\n",
62474 - atomic_read(&audit_lost),
62475 + atomic_read_unchecked(&audit_lost),
62476 audit_rate_limit,
62477 audit_backlog_limit);
62478 audit_panic(message);
62479 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62480 status_set.pid = audit_pid;
62481 status_set.rate_limit = audit_rate_limit;
62482 status_set.backlog_limit = audit_backlog_limit;
62483 - status_set.lost = atomic_read(&audit_lost);
62484 + status_set.lost = atomic_read_unchecked(&audit_lost);
62485 status_set.backlog = skb_queue_len(&audit_skb_queue);
62486 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62487 &status_set, sizeof(status_set));
62488 @@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62489 avail = audit_expand(ab,
62490 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62491 if (!avail)
62492 - goto out;
62493 + goto out_va_end;
62494 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62495 }
62496 - va_end(args2);
62497 if (len > 0)
62498 skb_put(skb, len);
62499 +out_va_end:
62500 + va_end(args2);
62501 out:
62502 return;
62503 }
62504 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62505 index 47b7fc1..c003c33 100644
62506 --- a/kernel/auditsc.c
62507 +++ b/kernel/auditsc.c
62508 @@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62509 struct audit_buffer **ab,
62510 struct audit_aux_data_execve *axi)
62511 {
62512 - int i;
62513 - size_t len, len_sent = 0;
62514 + int i, len;
62515 + size_t len_sent = 0;
62516 const char __user *p;
62517 char *buf;
62518
62519 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62520 }
62521
62522 /* global counter which is incremented every time something logs in */
62523 -static atomic_t session_id = ATOMIC_INIT(0);
62524 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62525
62526 /**
62527 * audit_set_loginuid - set a task's audit_context loginuid
62528 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62529 */
62530 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62531 {
62532 - unsigned int sessionid = atomic_inc_return(&session_id);
62533 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62534 struct audit_context *context = task->audit_context;
62535
62536 if (context && context->in_syscall) {
62537 diff --git a/kernel/capability.c b/kernel/capability.c
62538 index b463871..fa3ea1f 100644
62539 --- a/kernel/capability.c
62540 +++ b/kernel/capability.c
62541 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62542 * before modification is attempted and the application
62543 * fails.
62544 */
62545 + if (tocopy > ARRAY_SIZE(kdata))
62546 + return -EFAULT;
62547 +
62548 if (copy_to_user(dataptr, kdata, tocopy
62549 * sizeof(struct __user_cap_data_struct))) {
62550 return -EFAULT;
62551 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62552 BUG();
62553 }
62554
62555 - if (security_capable(ns, current_cred(), cap) == 0) {
62556 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62557 current->flags |= PF_SUPERPRIV;
62558 return true;
62559 }
62560 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
62561 }
62562 EXPORT_SYMBOL(ns_capable);
62563
62564 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
62565 +{
62566 + if (unlikely(!cap_valid(cap))) {
62567 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62568 + BUG();
62569 + }
62570 +
62571 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62572 + current->flags |= PF_SUPERPRIV;
62573 + return true;
62574 + }
62575 + return false;
62576 +}
62577 +EXPORT_SYMBOL(ns_capable_nolog);
62578 +
62579 +bool capable_nolog(int cap)
62580 +{
62581 + return ns_capable_nolog(&init_user_ns, cap);
62582 +}
62583 +EXPORT_SYMBOL(capable_nolog);
62584 +
62585 /**
62586 * task_ns_capable - Determine whether current task has a superior
62587 * capability targeted at a specific task's user namespace.
62588 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
62589 }
62590 EXPORT_SYMBOL(task_ns_capable);
62591
62592 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
62593 +{
62594 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62595 +}
62596 +EXPORT_SYMBOL(task_ns_capable_nolog);
62597 +
62598 /**
62599 * nsown_capable - Check superior capability to one's own user_ns
62600 * @cap: The capability in question
62601 diff --git a/kernel/compat.c b/kernel/compat.c
62602 index f346ced..aa2b1f4 100644
62603 --- a/kernel/compat.c
62604 +++ b/kernel/compat.c
62605 @@ -13,6 +13,7 @@
62606
62607 #include <linux/linkage.h>
62608 #include <linux/compat.h>
62609 +#include <linux/module.h>
62610 #include <linux/errno.h>
62611 #include <linux/time.h>
62612 #include <linux/signal.h>
62613 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
62614 mm_segment_t oldfs;
62615 long ret;
62616
62617 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62618 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62619 oldfs = get_fs();
62620 set_fs(KERNEL_DS);
62621 ret = hrtimer_nanosleep_restart(restart);
62622 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
62623 oldfs = get_fs();
62624 set_fs(KERNEL_DS);
62625 ret = hrtimer_nanosleep(&tu,
62626 - rmtp ? (struct timespec __user *)&rmt : NULL,
62627 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
62628 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62629 set_fs(oldfs);
62630
62631 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
62632 mm_segment_t old_fs = get_fs();
62633
62634 set_fs(KERNEL_DS);
62635 - ret = sys_sigpending((old_sigset_t __user *) &s);
62636 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
62637 set_fs(old_fs);
62638 if (ret == 0)
62639 ret = put_user(s, set);
62640 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
62641 old_fs = get_fs();
62642 set_fs(KERNEL_DS);
62643 ret = sys_sigprocmask(how,
62644 - set ? (old_sigset_t __user *) &s : NULL,
62645 - oset ? (old_sigset_t __user *) &s : NULL);
62646 + set ? (old_sigset_t __force_user *) &s : NULL,
62647 + oset ? (old_sigset_t __force_user *) &s : NULL);
62648 set_fs(old_fs);
62649 if (ret == 0)
62650 if (oset)
62651 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
62652 mm_segment_t old_fs = get_fs();
62653
62654 set_fs(KERNEL_DS);
62655 - ret = sys_old_getrlimit(resource, &r);
62656 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62657 set_fs(old_fs);
62658
62659 if (!ret) {
62660 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
62661 mm_segment_t old_fs = get_fs();
62662
62663 set_fs(KERNEL_DS);
62664 - ret = sys_getrusage(who, (struct rusage __user *) &r);
62665 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62666 set_fs(old_fs);
62667
62668 if (ret)
62669 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
62670 set_fs (KERNEL_DS);
62671 ret = sys_wait4(pid,
62672 (stat_addr ?
62673 - (unsigned int __user *) &status : NULL),
62674 - options, (struct rusage __user *) &r);
62675 + (unsigned int __force_user *) &status : NULL),
62676 + options, (struct rusage __force_user *) &r);
62677 set_fs (old_fs);
62678
62679 if (ret > 0) {
62680 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
62681 memset(&info, 0, sizeof(info));
62682
62683 set_fs(KERNEL_DS);
62684 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62685 - uru ? (struct rusage __user *)&ru : NULL);
62686 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62687 + uru ? (struct rusage __force_user *)&ru : NULL);
62688 set_fs(old_fs);
62689
62690 if ((ret < 0) || (info.si_signo == 0))
62691 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
62692 oldfs = get_fs();
62693 set_fs(KERNEL_DS);
62694 err = sys_timer_settime(timer_id, flags,
62695 - (struct itimerspec __user *) &newts,
62696 - (struct itimerspec __user *) &oldts);
62697 + (struct itimerspec __force_user *) &newts,
62698 + (struct itimerspec __force_user *) &oldts);
62699 set_fs(oldfs);
62700 if (!err && old && put_compat_itimerspec(old, &oldts))
62701 return -EFAULT;
62702 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
62703 oldfs = get_fs();
62704 set_fs(KERNEL_DS);
62705 err = sys_timer_gettime(timer_id,
62706 - (struct itimerspec __user *) &ts);
62707 + (struct itimerspec __force_user *) &ts);
62708 set_fs(oldfs);
62709 if (!err && put_compat_itimerspec(setting, &ts))
62710 return -EFAULT;
62711 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
62712 oldfs = get_fs();
62713 set_fs(KERNEL_DS);
62714 err = sys_clock_settime(which_clock,
62715 - (struct timespec __user *) &ts);
62716 + (struct timespec __force_user *) &ts);
62717 set_fs(oldfs);
62718 return err;
62719 }
62720 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
62721 oldfs = get_fs();
62722 set_fs(KERNEL_DS);
62723 err = sys_clock_gettime(which_clock,
62724 - (struct timespec __user *) &ts);
62725 + (struct timespec __force_user *) &ts);
62726 set_fs(oldfs);
62727 if (!err && put_compat_timespec(&ts, tp))
62728 return -EFAULT;
62729 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
62730
62731 oldfs = get_fs();
62732 set_fs(KERNEL_DS);
62733 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62734 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62735 set_fs(oldfs);
62736
62737 err = compat_put_timex(utp, &txc);
62738 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
62739 oldfs = get_fs();
62740 set_fs(KERNEL_DS);
62741 err = sys_clock_getres(which_clock,
62742 - (struct timespec __user *) &ts);
62743 + (struct timespec __force_user *) &ts);
62744 set_fs(oldfs);
62745 if (!err && tp && put_compat_timespec(&ts, tp))
62746 return -EFAULT;
62747 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
62748 long err;
62749 mm_segment_t oldfs;
62750 struct timespec tu;
62751 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62752 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62753
62754 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62755 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62756 oldfs = get_fs();
62757 set_fs(KERNEL_DS);
62758 err = clock_nanosleep_restart(restart);
62759 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
62760 oldfs = get_fs();
62761 set_fs(KERNEL_DS);
62762 err = sys_clock_nanosleep(which_clock, flags,
62763 - (struct timespec __user *) &in,
62764 - (struct timespec __user *) &out);
62765 + (struct timespec __force_user *) &in,
62766 + (struct timespec __force_user *) &out);
62767 set_fs(oldfs);
62768
62769 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62770 diff --git a/kernel/configs.c b/kernel/configs.c
62771 index 42e8fa0..9e7406b 100644
62772 --- a/kernel/configs.c
62773 +++ b/kernel/configs.c
62774 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62775 struct proc_dir_entry *entry;
62776
62777 /* create the current config file */
62778 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62779 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62780 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62781 + &ikconfig_file_ops);
62782 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62783 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62784 + &ikconfig_file_ops);
62785 +#endif
62786 +#else
62787 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62788 &ikconfig_file_ops);
62789 +#endif
62790 +
62791 if (!entry)
62792 return -ENOMEM;
62793
62794 diff --git a/kernel/cred.c b/kernel/cred.c
62795 index 5791612..a3c04dc 100644
62796 --- a/kernel/cred.c
62797 +++ b/kernel/cred.c
62798 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
62799 validate_creds(cred);
62800 put_cred(cred);
62801 }
62802 +
62803 +#ifdef CONFIG_GRKERNSEC_SETXID
62804 + cred = (struct cred *) tsk->delayed_cred;
62805 + if (cred) {
62806 + tsk->delayed_cred = NULL;
62807 + validate_creds(cred);
62808 + put_cred(cred);
62809 + }
62810 +#endif
62811 }
62812
62813 /**
62814 @@ -470,7 +479,7 @@ error_put:
62815 * Always returns 0 thus allowing this function to be tail-called at the end
62816 * of, say, sys_setgid().
62817 */
62818 -int commit_creds(struct cred *new)
62819 +static int __commit_creds(struct cred *new)
62820 {
62821 struct task_struct *task = current;
62822 const struct cred *old = task->real_cred;
62823 @@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
62824
62825 get_cred(new); /* we will require a ref for the subj creds too */
62826
62827 + gr_set_role_label(task, new->uid, new->gid);
62828 +
62829 /* dumpability changes */
62830 if (old->euid != new->euid ||
62831 old->egid != new->egid ||
62832 @@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
62833 put_cred(old);
62834 return 0;
62835 }
62836 +#ifdef CONFIG_GRKERNSEC_SETXID
62837 +extern int set_user(struct cred *new);
62838 +
62839 +void gr_delayed_cred_worker(void)
62840 +{
62841 + const struct cred *new = current->delayed_cred;
62842 + struct cred *ncred;
62843 +
62844 + current->delayed_cred = NULL;
62845 +
62846 + if (current_uid() && new != NULL) {
62847 + // from doing get_cred on it when queueing this
62848 + put_cred(new);
62849 + return;
62850 + } else if (new == NULL)
62851 + return;
62852 +
62853 + ncred = prepare_creds();
62854 + if (!ncred)
62855 + goto die;
62856 + // uids
62857 + ncred->uid = new->uid;
62858 + ncred->euid = new->euid;
62859 + ncred->suid = new->suid;
62860 + ncred->fsuid = new->fsuid;
62861 + // gids
62862 + ncred->gid = new->gid;
62863 + ncred->egid = new->egid;
62864 + ncred->sgid = new->sgid;
62865 + ncred->fsgid = new->fsgid;
62866 + // groups
62867 + if (set_groups(ncred, new->group_info) < 0) {
62868 + abort_creds(ncred);
62869 + goto die;
62870 + }
62871 + // caps
62872 + ncred->securebits = new->securebits;
62873 + ncred->cap_inheritable = new->cap_inheritable;
62874 + ncred->cap_permitted = new->cap_permitted;
62875 + ncred->cap_effective = new->cap_effective;
62876 + ncred->cap_bset = new->cap_bset;
62877 +
62878 + if (set_user(ncred)) {
62879 + abort_creds(ncred);
62880 + goto die;
62881 + }
62882 +
62883 + // from doing get_cred on it when queueing this
62884 + put_cred(new);
62885 +
62886 + __commit_creds(ncred);
62887 + return;
62888 +die:
62889 + // from doing get_cred on it when queueing this
62890 + put_cred(new);
62891 + do_group_exit(SIGKILL);
62892 +}
62893 +#endif
62894 +
62895 +int commit_creds(struct cred *new)
62896 +{
62897 +#ifdef CONFIG_GRKERNSEC_SETXID
62898 + struct task_struct *t;
62899 +
62900 + /* we won't get called with tasklist_lock held for writing
62901 + and interrupts disabled as the cred struct in that case is
62902 + init_cred
62903 + */
62904 + if (grsec_enable_setxid && !current_is_single_threaded() &&
62905 + !current_uid() && new->uid) {
62906 + rcu_read_lock();
62907 + read_lock(&tasklist_lock);
62908 + for (t = next_thread(current); t != current;
62909 + t = next_thread(t)) {
62910 + if (t->delayed_cred == NULL) {
62911 + t->delayed_cred = get_cred(new);
62912 + set_tsk_need_resched(t);
62913 + }
62914 + }
62915 + read_unlock(&tasklist_lock);
62916 + rcu_read_unlock();
62917 + }
62918 +#endif
62919 + return __commit_creds(new);
62920 +}
62921 +
62922 EXPORT_SYMBOL(commit_creds);
62923
62924 /**
62925 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
62926 index 0d7c087..01b8cef 100644
62927 --- a/kernel/debug/debug_core.c
62928 +++ b/kernel/debug/debug_core.c
62929 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
62930 */
62931 static atomic_t masters_in_kgdb;
62932 static atomic_t slaves_in_kgdb;
62933 -static atomic_t kgdb_break_tasklet_var;
62934 +static atomic_unchecked_t kgdb_break_tasklet_var;
62935 atomic_t kgdb_setting_breakpoint;
62936
62937 struct task_struct *kgdb_usethread;
62938 @@ -129,7 +129,7 @@ int kgdb_single_step;
62939 static pid_t kgdb_sstep_pid;
62940
62941 /* to keep track of the CPU which is doing the single stepping*/
62942 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62943 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62944
62945 /*
62946 * If you are debugging a problem where roundup (the collection of
62947 @@ -542,7 +542,7 @@ return_normal:
62948 * kernel will only try for the value of sstep_tries before
62949 * giving up and continuing on.
62950 */
62951 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62952 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62953 (kgdb_info[cpu].task &&
62954 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62955 atomic_set(&kgdb_active, -1);
62956 @@ -636,8 +636,8 @@ cpu_master_loop:
62957 }
62958
62959 kgdb_restore:
62960 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62961 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62962 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62963 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62964 if (kgdb_info[sstep_cpu].task)
62965 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62966 else
62967 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
62968 static void kgdb_tasklet_bpt(unsigned long ing)
62969 {
62970 kgdb_breakpoint();
62971 - atomic_set(&kgdb_break_tasklet_var, 0);
62972 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
62973 }
62974
62975 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
62976
62977 void kgdb_schedule_breakpoint(void)
62978 {
62979 - if (atomic_read(&kgdb_break_tasklet_var) ||
62980 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
62981 atomic_read(&kgdb_active) != -1 ||
62982 atomic_read(&kgdb_setting_breakpoint))
62983 return;
62984 - atomic_inc(&kgdb_break_tasklet_var);
62985 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
62986 tasklet_schedule(&kgdb_tasklet_breakpoint);
62987 }
62988 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
62989 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
62990 index 63786e7..0780cac 100644
62991 --- a/kernel/debug/kdb/kdb_main.c
62992 +++ b/kernel/debug/kdb/kdb_main.c
62993 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
62994 list_for_each_entry(mod, kdb_modules, list) {
62995
62996 kdb_printf("%-20s%8u 0x%p ", mod->name,
62997 - mod->core_size, (void *)mod);
62998 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
62999 #ifdef CONFIG_MODULE_UNLOAD
63000 kdb_printf("%4d ", module_refcount(mod));
63001 #endif
63002 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63003 kdb_printf(" (Loading)");
63004 else
63005 kdb_printf(" (Live)");
63006 - kdb_printf(" 0x%p", mod->module_core);
63007 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63008
63009 #ifdef CONFIG_MODULE_UNLOAD
63010 {
63011 diff --git a/kernel/events/core.c b/kernel/events/core.c
63012 index 58690af..d903d75 100644
63013 --- a/kernel/events/core.c
63014 +++ b/kernel/events/core.c
63015 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63016 return 0;
63017 }
63018
63019 -static atomic64_t perf_event_id;
63020 +static atomic64_unchecked_t perf_event_id;
63021
63022 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63023 enum event_type_t event_type);
63024 @@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63025
63026 static inline u64 perf_event_count(struct perf_event *event)
63027 {
63028 - return local64_read(&event->count) + atomic64_read(&event->child_count);
63029 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63030 }
63031
63032 static u64 perf_event_read(struct perf_event *event)
63033 @@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63034 mutex_lock(&event->child_mutex);
63035 total += perf_event_read(event);
63036 *enabled += event->total_time_enabled +
63037 - atomic64_read(&event->child_total_time_enabled);
63038 + atomic64_read_unchecked(&event->child_total_time_enabled);
63039 *running += event->total_time_running +
63040 - atomic64_read(&event->child_total_time_running);
63041 + atomic64_read_unchecked(&event->child_total_time_running);
63042
63043 list_for_each_entry(child, &event->child_list, child_list) {
63044 total += perf_event_read(child);
63045 @@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63046 userpg->offset -= local64_read(&event->hw.prev_count);
63047
63048 userpg->time_enabled = enabled +
63049 - atomic64_read(&event->child_total_time_enabled);
63050 + atomic64_read_unchecked(&event->child_total_time_enabled);
63051
63052 userpg->time_running = running +
63053 - atomic64_read(&event->child_total_time_running);
63054 + atomic64_read_unchecked(&event->child_total_time_running);
63055
63056 barrier();
63057 ++userpg->lock;
63058 @@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63059 values[n++] = perf_event_count(event);
63060 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63061 values[n++] = enabled +
63062 - atomic64_read(&event->child_total_time_enabled);
63063 + atomic64_read_unchecked(&event->child_total_time_enabled);
63064 }
63065 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63066 values[n++] = running +
63067 - atomic64_read(&event->child_total_time_running);
63068 + atomic64_read_unchecked(&event->child_total_time_running);
63069 }
63070 if (read_format & PERF_FORMAT_ID)
63071 values[n++] = primary_event_id(event);
63072 @@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63073 * need to add enough zero bytes after the string to handle
63074 * the 64bit alignment we do later.
63075 */
63076 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63077 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
63078 if (!buf) {
63079 name = strncpy(tmp, "//enomem", sizeof(tmp));
63080 goto got_name;
63081 }
63082 - name = d_path(&file->f_path, buf, PATH_MAX);
63083 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63084 if (IS_ERR(name)) {
63085 name = strncpy(tmp, "//toolong", sizeof(tmp));
63086 goto got_name;
63087 @@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63088 event->parent = parent_event;
63089
63090 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63091 - event->id = atomic64_inc_return(&perf_event_id);
63092 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
63093
63094 event->state = PERF_EVENT_STATE_INACTIVE;
63095
63096 @@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63097 /*
63098 * Add back the child's count to the parent's count:
63099 */
63100 - atomic64_add(child_val, &parent_event->child_count);
63101 - atomic64_add(child_event->total_time_enabled,
63102 + atomic64_add_unchecked(child_val, &parent_event->child_count);
63103 + atomic64_add_unchecked(child_event->total_time_enabled,
63104 &parent_event->child_total_time_enabled);
63105 - atomic64_add(child_event->total_time_running,
63106 + atomic64_add_unchecked(child_event->total_time_running,
63107 &parent_event->child_total_time_running);
63108
63109 /*
63110 diff --git a/kernel/exit.c b/kernel/exit.c
63111 index e6e01b9..619f837 100644
63112 --- a/kernel/exit.c
63113 +++ b/kernel/exit.c
63114 @@ -57,6 +57,10 @@
63115 #include <asm/pgtable.h>
63116 #include <asm/mmu_context.h>
63117
63118 +#ifdef CONFIG_GRKERNSEC
63119 +extern rwlock_t grsec_exec_file_lock;
63120 +#endif
63121 +
63122 static void exit_mm(struct task_struct * tsk);
63123
63124 static void __unhash_process(struct task_struct *p, bool group_dead)
63125 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63126 struct task_struct *leader;
63127 int zap_leader;
63128 repeat:
63129 +#ifdef CONFIG_NET
63130 + gr_del_task_from_ip_table(p);
63131 +#endif
63132 +
63133 /* don't need to get the RCU readlock here - the process is dead and
63134 * can't be modifying its own credentials. But shut RCU-lockdep up */
63135 rcu_read_lock();
63136 @@ -380,7 +388,7 @@ int allow_signal(int sig)
63137 * know it'll be handled, so that they don't get converted to
63138 * SIGKILL or just silently dropped.
63139 */
63140 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63141 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63142 recalc_sigpending();
63143 spin_unlock_irq(&current->sighand->siglock);
63144 return 0;
63145 @@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63146 vsnprintf(current->comm, sizeof(current->comm), name, args);
63147 va_end(args);
63148
63149 +#ifdef CONFIG_GRKERNSEC
63150 + write_lock(&grsec_exec_file_lock);
63151 + if (current->exec_file) {
63152 + fput(current->exec_file);
63153 + current->exec_file = NULL;
63154 + }
63155 + write_unlock(&grsec_exec_file_lock);
63156 +#endif
63157 +
63158 + gr_set_kernel_label(current);
63159 +
63160 /*
63161 * If we were started as result of loading a module, close all of the
63162 * user space pages. We don't need them, and if we didn't close them
63163 @@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63164 struct task_struct *tsk = current;
63165 int group_dead;
63166
63167 + set_fs(USER_DS);
63168 +
63169 profile_task_exit(tsk);
63170
63171 WARN_ON(blk_needs_flush_plug(tsk));
63172 @@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63173 * mm_release()->clear_child_tid() from writing to a user-controlled
63174 * kernel address.
63175 */
63176 - set_fs(USER_DS);
63177
63178 ptrace_event(PTRACE_EVENT_EXIT, code);
63179
63180 @@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63181 tsk->exit_code = code;
63182 taskstats_exit(tsk, group_dead);
63183
63184 + gr_acl_handle_psacct(tsk, code);
63185 + gr_acl_handle_exit();
63186 +
63187 exit_mm(tsk);
63188
63189 if (group_dead)
63190 diff --git a/kernel/fork.c b/kernel/fork.c
63191 index da4a6a1..0973380 100644
63192 --- a/kernel/fork.c
63193 +++ b/kernel/fork.c
63194 @@ -280,7 +280,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63195 *stackend = STACK_END_MAGIC; /* for overflow detection */
63196
63197 #ifdef CONFIG_CC_STACKPROTECTOR
63198 - tsk->stack_canary = get_random_int();
63199 + tsk->stack_canary = pax_get_random_long();
63200 #endif
63201
63202 /*
63203 @@ -304,13 +304,77 @@ out:
63204 }
63205
63206 #ifdef CONFIG_MMU
63207 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63208 +{
63209 + struct vm_area_struct *tmp;
63210 + unsigned long charge;
63211 + struct mempolicy *pol;
63212 + struct file *file;
63213 +
63214 + charge = 0;
63215 + if (mpnt->vm_flags & VM_ACCOUNT) {
63216 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63217 + if (security_vm_enough_memory(len))
63218 + goto fail_nomem;
63219 + charge = len;
63220 + }
63221 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63222 + if (!tmp)
63223 + goto fail_nomem;
63224 + *tmp = *mpnt;
63225 + tmp->vm_mm = mm;
63226 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
63227 + pol = mpol_dup(vma_policy(mpnt));
63228 + if (IS_ERR(pol))
63229 + goto fail_nomem_policy;
63230 + vma_set_policy(tmp, pol);
63231 + if (anon_vma_fork(tmp, mpnt))
63232 + goto fail_nomem_anon_vma_fork;
63233 + tmp->vm_flags &= ~VM_LOCKED;
63234 + tmp->vm_next = tmp->vm_prev = NULL;
63235 + tmp->vm_mirror = NULL;
63236 + file = tmp->vm_file;
63237 + if (file) {
63238 + struct inode *inode = file->f_path.dentry->d_inode;
63239 + struct address_space *mapping = file->f_mapping;
63240 +
63241 + get_file(file);
63242 + if (tmp->vm_flags & VM_DENYWRITE)
63243 + atomic_dec(&inode->i_writecount);
63244 + mutex_lock(&mapping->i_mmap_mutex);
63245 + if (tmp->vm_flags & VM_SHARED)
63246 + mapping->i_mmap_writable++;
63247 + flush_dcache_mmap_lock(mapping);
63248 + /* insert tmp into the share list, just after mpnt */
63249 + vma_prio_tree_add(tmp, mpnt);
63250 + flush_dcache_mmap_unlock(mapping);
63251 + mutex_unlock(&mapping->i_mmap_mutex);
63252 + }
63253 +
63254 + /*
63255 + * Clear hugetlb-related page reserves for children. This only
63256 + * affects MAP_PRIVATE mappings. Faults generated by the child
63257 + * are not guaranteed to succeed, even if read-only
63258 + */
63259 + if (is_vm_hugetlb_page(tmp))
63260 + reset_vma_resv_huge_pages(tmp);
63261 +
63262 + return tmp;
63263 +
63264 +fail_nomem_anon_vma_fork:
63265 + mpol_put(pol);
63266 +fail_nomem_policy:
63267 + kmem_cache_free(vm_area_cachep, tmp);
63268 +fail_nomem:
63269 + vm_unacct_memory(charge);
63270 + return NULL;
63271 +}
63272 +
63273 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63274 {
63275 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63276 struct rb_node **rb_link, *rb_parent;
63277 int retval;
63278 - unsigned long charge;
63279 - struct mempolicy *pol;
63280
63281 down_write(&oldmm->mmap_sem);
63282 flush_cache_dup_mm(oldmm);
63283 @@ -322,8 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63284 mm->locked_vm = 0;
63285 mm->mmap = NULL;
63286 mm->mmap_cache = NULL;
63287 - mm->free_area_cache = oldmm->mmap_base;
63288 - mm->cached_hole_size = ~0UL;
63289 + mm->free_area_cache = oldmm->free_area_cache;
63290 + mm->cached_hole_size = oldmm->cached_hole_size;
63291 mm->map_count = 0;
63292 cpumask_clear(mm_cpumask(mm));
63293 mm->mm_rb = RB_ROOT;
63294 @@ -339,8 +403,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63295
63296 prev = NULL;
63297 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63298 - struct file *file;
63299 -
63300 if (mpnt->vm_flags & VM_DONTCOPY) {
63301 long pages = vma_pages(mpnt);
63302 mm->total_vm -= pages;
63303 @@ -348,53 +410,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63304 -pages);
63305 continue;
63306 }
63307 - charge = 0;
63308 - if (mpnt->vm_flags & VM_ACCOUNT) {
63309 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63310 - if (security_vm_enough_memory(len))
63311 - goto fail_nomem;
63312 - charge = len;
63313 + tmp = dup_vma(mm, mpnt);
63314 + if (!tmp) {
63315 + retval = -ENOMEM;
63316 + goto out;
63317 }
63318 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63319 - if (!tmp)
63320 - goto fail_nomem;
63321 - *tmp = *mpnt;
63322 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
63323 - pol = mpol_dup(vma_policy(mpnt));
63324 - retval = PTR_ERR(pol);
63325 - if (IS_ERR(pol))
63326 - goto fail_nomem_policy;
63327 - vma_set_policy(tmp, pol);
63328 - tmp->vm_mm = mm;
63329 - if (anon_vma_fork(tmp, mpnt))
63330 - goto fail_nomem_anon_vma_fork;
63331 - tmp->vm_flags &= ~VM_LOCKED;
63332 - tmp->vm_next = tmp->vm_prev = NULL;
63333 - file = tmp->vm_file;
63334 - if (file) {
63335 - struct inode *inode = file->f_path.dentry->d_inode;
63336 - struct address_space *mapping = file->f_mapping;
63337 -
63338 - get_file(file);
63339 - if (tmp->vm_flags & VM_DENYWRITE)
63340 - atomic_dec(&inode->i_writecount);
63341 - mutex_lock(&mapping->i_mmap_mutex);
63342 - if (tmp->vm_flags & VM_SHARED)
63343 - mapping->i_mmap_writable++;
63344 - flush_dcache_mmap_lock(mapping);
63345 - /* insert tmp into the share list, just after mpnt */
63346 - vma_prio_tree_add(tmp, mpnt);
63347 - flush_dcache_mmap_unlock(mapping);
63348 - mutex_unlock(&mapping->i_mmap_mutex);
63349 - }
63350 -
63351 - /*
63352 - * Clear hugetlb-related page reserves for children. This only
63353 - * affects MAP_PRIVATE mappings. Faults generated by the child
63354 - * are not guaranteed to succeed, even if read-only
63355 - */
63356 - if (is_vm_hugetlb_page(tmp))
63357 - reset_vma_resv_huge_pages(tmp);
63358
63359 /*
63360 * Link in the new vma and copy the page table entries.
63361 @@ -417,6 +437,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63362 if (retval)
63363 goto out;
63364 }
63365 +
63366 +#ifdef CONFIG_PAX_SEGMEXEC
63367 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63368 + struct vm_area_struct *mpnt_m;
63369 +
63370 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63371 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63372 +
63373 + if (!mpnt->vm_mirror)
63374 + continue;
63375 +
63376 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63377 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63378 + mpnt->vm_mirror = mpnt_m;
63379 + } else {
63380 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63381 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63382 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63383 + mpnt->vm_mirror->vm_mirror = mpnt;
63384 + }
63385 + }
63386 + BUG_ON(mpnt_m);
63387 + }
63388 +#endif
63389 +
63390 /* a new mm has just been created */
63391 arch_dup_mmap(oldmm, mm);
63392 retval = 0;
63393 @@ -425,14 +470,6 @@ out:
63394 flush_tlb_mm(oldmm);
63395 up_write(&oldmm->mmap_sem);
63396 return retval;
63397 -fail_nomem_anon_vma_fork:
63398 - mpol_put(pol);
63399 -fail_nomem_policy:
63400 - kmem_cache_free(vm_area_cachep, tmp);
63401 -fail_nomem:
63402 - retval = -ENOMEM;
63403 - vm_unacct_memory(charge);
63404 - goto out;
63405 }
63406
63407 static inline int mm_alloc_pgd(struct mm_struct *mm)
63408 @@ -644,6 +681,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
63409 }
63410 EXPORT_SYMBOL_GPL(get_task_mm);
63411
63412 +struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
63413 +{
63414 + struct mm_struct *mm;
63415 + int err;
63416 +
63417 + err = mutex_lock_killable(&task->signal->cred_guard_mutex);
63418 + if (err)
63419 + return ERR_PTR(err);
63420 +
63421 + mm = get_task_mm(task);
63422 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
63423 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
63424 + mmput(mm);
63425 + mm = ERR_PTR(-EACCES);
63426 + }
63427 + mutex_unlock(&task->signal->cred_guard_mutex);
63428 +
63429 + return mm;
63430 +}
63431 +
63432 /* Please note the differences between mmput and mm_release.
63433 * mmput is called whenever we stop holding onto a mm_struct,
63434 * error success whatever.
63435 @@ -829,13 +886,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63436 spin_unlock(&fs->lock);
63437 return -EAGAIN;
63438 }
63439 - fs->users++;
63440 + atomic_inc(&fs->users);
63441 spin_unlock(&fs->lock);
63442 return 0;
63443 }
63444 tsk->fs = copy_fs_struct(fs);
63445 if (!tsk->fs)
63446 return -ENOMEM;
63447 + gr_set_chroot_entries(tsk, &tsk->fs->root);
63448 return 0;
63449 }
63450
63451 @@ -1097,6 +1155,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63452 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63453 #endif
63454 retval = -EAGAIN;
63455 +
63456 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63457 +
63458 if (atomic_read(&p->real_cred->user->processes) >=
63459 task_rlimit(p, RLIMIT_NPROC)) {
63460 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63461 @@ -1256,6 +1317,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63462 if (clone_flags & CLONE_THREAD)
63463 p->tgid = current->tgid;
63464
63465 + gr_copy_label(p);
63466 +
63467 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63468 /*
63469 * Clear TID on mm_release()?
63470 @@ -1418,6 +1481,8 @@ bad_fork_cleanup_count:
63471 bad_fork_free:
63472 free_task(p);
63473 fork_out:
63474 + gr_log_forkfail(retval);
63475 +
63476 return ERR_PTR(retval);
63477 }
63478
63479 @@ -1518,6 +1583,8 @@ long do_fork(unsigned long clone_flags,
63480 if (clone_flags & CLONE_PARENT_SETTID)
63481 put_user(nr, parent_tidptr);
63482
63483 + gr_handle_brute_check();
63484 +
63485 if (clone_flags & CLONE_VFORK) {
63486 p->vfork_done = &vfork;
63487 init_completion(&vfork);
63488 @@ -1627,7 +1694,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63489 return 0;
63490
63491 /* don't need lock here; in the worst case we'll do useless copy */
63492 - if (fs->users == 1)
63493 + if (atomic_read(&fs->users) == 1)
63494 return 0;
63495
63496 *new_fsp = copy_fs_struct(fs);
63497 @@ -1716,7 +1783,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63498 fs = current->fs;
63499 spin_lock(&fs->lock);
63500 current->fs = new_fs;
63501 - if (--fs->users)
63502 + gr_set_chroot_entries(current, &current->fs->root);
63503 + if (atomic_dec_return(&fs->users))
63504 new_fs = NULL;
63505 else
63506 new_fs = fs;
63507 diff --git a/kernel/futex.c b/kernel/futex.c
63508 index 1614be2..37abc7e 100644
63509 --- a/kernel/futex.c
63510 +++ b/kernel/futex.c
63511 @@ -54,6 +54,7 @@
63512 #include <linux/mount.h>
63513 #include <linux/pagemap.h>
63514 #include <linux/syscalls.h>
63515 +#include <linux/ptrace.h>
63516 #include <linux/signal.h>
63517 #include <linux/export.h>
63518 #include <linux/magic.h>
63519 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63520 struct page *page, *page_head;
63521 int err, ro = 0;
63522
63523 +#ifdef CONFIG_PAX_SEGMEXEC
63524 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63525 + return -EFAULT;
63526 +#endif
63527 +
63528 /*
63529 * The futex address must be "naturally" aligned.
63530 */
63531 @@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63532 if (!p)
63533 goto err_unlock;
63534 ret = -EPERM;
63535 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63536 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63537 + goto err_unlock;
63538 +#endif
63539 pcred = __task_cred(p);
63540 /* If victim is in different user_ns, then uids are not
63541 comparable, so we must have CAP_SYS_PTRACE */
63542 @@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63543 {
63544 u32 curval;
63545 int i;
63546 + mm_segment_t oldfs;
63547
63548 /*
63549 * This will fail and we want it. Some arch implementations do
63550 @@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63551 * implementation, the non-functional ones will return
63552 * -ENOSYS.
63553 */
63554 + oldfs = get_fs();
63555 + set_fs(USER_DS);
63556 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63557 futex_cmpxchg_enabled = 1;
63558 + set_fs(oldfs);
63559
63560 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63561 plist_head_init(&futex_queues[i].chain);
63562 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
63563 index 5f9e689..582d46d 100644
63564 --- a/kernel/futex_compat.c
63565 +++ b/kernel/futex_compat.c
63566 @@ -10,6 +10,7 @@
63567 #include <linux/compat.h>
63568 #include <linux/nsproxy.h>
63569 #include <linux/futex.h>
63570 +#include <linux/ptrace.h>
63571
63572 #include <asm/uaccess.h>
63573
63574 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63575 {
63576 struct compat_robust_list_head __user *head;
63577 unsigned long ret;
63578 - const struct cred *cred = current_cred(), *pcred;
63579 + const struct cred *cred = current_cred();
63580 + const struct cred *pcred;
63581
63582 if (!futex_cmpxchg_enabled)
63583 return -ENOSYS;
63584 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63585 if (!p)
63586 goto err_unlock;
63587 ret = -EPERM;
63588 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63589 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63590 + goto err_unlock;
63591 +#endif
63592 pcred = __task_cred(p);
63593 /* If victim is in different user_ns, then uids are not
63594 comparable, so we must have CAP_SYS_PTRACE */
63595 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
63596 index 9b22d03..6295b62 100644
63597 --- a/kernel/gcov/base.c
63598 +++ b/kernel/gcov/base.c
63599 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
63600 }
63601
63602 #ifdef CONFIG_MODULES
63603 -static inline int within(void *addr, void *start, unsigned long size)
63604 -{
63605 - return ((addr >= start) && (addr < start + size));
63606 -}
63607 -
63608 /* Update list and generate events when modules are unloaded. */
63609 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63610 void *data)
63611 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63612 prev = NULL;
63613 /* Remove entries located in module from linked list. */
63614 for (info = gcov_info_head; info; info = info->next) {
63615 - if (within(info, mod->module_core, mod->core_size)) {
63616 + if (within_module_core_rw((unsigned long)info, mod)) {
63617 if (prev)
63618 prev->next = info->next;
63619 else
63620 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
63621 index ae34bf5..4e2f3d0 100644
63622 --- a/kernel/hrtimer.c
63623 +++ b/kernel/hrtimer.c
63624 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
63625 local_irq_restore(flags);
63626 }
63627
63628 -static void run_hrtimer_softirq(struct softirq_action *h)
63629 +static void run_hrtimer_softirq(void)
63630 {
63631 hrtimer_peek_ahead_timers();
63632 }
63633 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
63634 index 66ff710..05a5128 100644
63635 --- a/kernel/jump_label.c
63636 +++ b/kernel/jump_label.c
63637 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
63638
63639 size = (((unsigned long)stop - (unsigned long)start)
63640 / sizeof(struct jump_entry));
63641 + pax_open_kernel();
63642 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63643 + pax_close_kernel();
63644 }
63645
63646 static void jump_label_update(struct jump_label_key *key, int enable);
63647 @@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
63648 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63649 struct jump_entry *iter;
63650
63651 + pax_open_kernel();
63652 for (iter = iter_start; iter < iter_stop; iter++) {
63653 if (within_module_init(iter->code, mod))
63654 iter->code = 0;
63655 }
63656 + pax_close_kernel();
63657 }
63658
63659 static int
63660 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
63661 index 079f1d3..a407562 100644
63662 --- a/kernel/kallsyms.c
63663 +++ b/kernel/kallsyms.c
63664 @@ -11,6 +11,9 @@
63665 * Changed the compression method from stem compression to "table lookup"
63666 * compression (see scripts/kallsyms.c for a more complete description)
63667 */
63668 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63669 +#define __INCLUDED_BY_HIDESYM 1
63670 +#endif
63671 #include <linux/kallsyms.h>
63672 #include <linux/module.h>
63673 #include <linux/init.h>
63674 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
63675
63676 static inline int is_kernel_inittext(unsigned long addr)
63677 {
63678 + if (system_state != SYSTEM_BOOTING)
63679 + return 0;
63680 +
63681 if (addr >= (unsigned long)_sinittext
63682 && addr <= (unsigned long)_einittext)
63683 return 1;
63684 return 0;
63685 }
63686
63687 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63688 +#ifdef CONFIG_MODULES
63689 +static inline int is_module_text(unsigned long addr)
63690 +{
63691 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63692 + return 1;
63693 +
63694 + addr = ktla_ktva(addr);
63695 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63696 +}
63697 +#else
63698 +static inline int is_module_text(unsigned long addr)
63699 +{
63700 + return 0;
63701 +}
63702 +#endif
63703 +#endif
63704 +
63705 static inline int is_kernel_text(unsigned long addr)
63706 {
63707 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63708 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
63709
63710 static inline int is_kernel(unsigned long addr)
63711 {
63712 +
63713 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63714 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
63715 + return 1;
63716 +
63717 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63718 +#else
63719 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63720 +#endif
63721 +
63722 return 1;
63723 return in_gate_area_no_mm(addr);
63724 }
63725
63726 static int is_ksym_addr(unsigned long addr)
63727 {
63728 +
63729 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63730 + if (is_module_text(addr))
63731 + return 0;
63732 +#endif
63733 +
63734 if (all_var)
63735 return is_kernel(addr);
63736
63737 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
63738
63739 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63740 {
63741 - iter->name[0] = '\0';
63742 iter->nameoff = get_symbol_offset(new_pos);
63743 iter->pos = new_pos;
63744 }
63745 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
63746 {
63747 struct kallsym_iter *iter = m->private;
63748
63749 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63750 + if (current_uid())
63751 + return 0;
63752 +#endif
63753 +
63754 /* Some debugging symbols have no name. Ignore them. */
63755 if (!iter->name[0])
63756 return 0;
63757 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
63758 struct kallsym_iter *iter;
63759 int ret;
63760
63761 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63762 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63763 if (!iter)
63764 return -ENOMEM;
63765 reset_iter(iter, 0);
63766 diff --git a/kernel/kexec.c b/kernel/kexec.c
63767 index dc7bc08..4601964 100644
63768 --- a/kernel/kexec.c
63769 +++ b/kernel/kexec.c
63770 @@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
63771 unsigned long flags)
63772 {
63773 struct compat_kexec_segment in;
63774 - struct kexec_segment out, __user *ksegments;
63775 + struct kexec_segment out;
63776 + struct kexec_segment __user *ksegments;
63777 unsigned long i, result;
63778
63779 /* Don't allow clients that don't understand the native
63780 diff --git a/kernel/kmod.c b/kernel/kmod.c
63781 index a4bea97..7a1ae9a 100644
63782 --- a/kernel/kmod.c
63783 +++ b/kernel/kmod.c
63784 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
63785 * If module auto-loading support is disabled then this function
63786 * becomes a no-operation.
63787 */
63788 -int __request_module(bool wait, const char *fmt, ...)
63789 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63790 {
63791 - va_list args;
63792 char module_name[MODULE_NAME_LEN];
63793 unsigned int max_modprobes;
63794 int ret;
63795 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63796 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63797 static char *envp[] = { "HOME=/",
63798 "TERM=linux",
63799 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63800 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
63801 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63802 static int kmod_loop_msg;
63803
63804 - va_start(args, fmt);
63805 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63806 - va_end(args);
63807 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63808 if (ret >= MODULE_NAME_LEN)
63809 return -ENAMETOOLONG;
63810
63811 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
63812 if (ret)
63813 return ret;
63814
63815 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63816 + if (!current_uid()) {
63817 + /* hack to workaround consolekit/udisks stupidity */
63818 + read_lock(&tasklist_lock);
63819 + if (!strcmp(current->comm, "mount") &&
63820 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63821 + read_unlock(&tasklist_lock);
63822 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63823 + return -EPERM;
63824 + }
63825 + read_unlock(&tasklist_lock);
63826 + }
63827 +#endif
63828 +
63829 /* If modprobe needs a service that is in a module, we get a recursive
63830 * loop. Limit the number of running kmod threads to max_threads/2 or
63831 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63832 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
63833 atomic_dec(&kmod_concurrent);
63834 return ret;
63835 }
63836 +
63837 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63838 +{
63839 + va_list args;
63840 + int ret;
63841 +
63842 + va_start(args, fmt);
63843 + ret = ____request_module(wait, module_param, fmt, args);
63844 + va_end(args);
63845 +
63846 + return ret;
63847 +}
63848 +
63849 +int __request_module(bool wait, const char *fmt, ...)
63850 +{
63851 + va_list args;
63852 + int ret;
63853 +
63854 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63855 + if (current_uid()) {
63856 + char module_param[MODULE_NAME_LEN];
63857 +
63858 + memset(module_param, 0, sizeof(module_param));
63859 +
63860 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63861 +
63862 + va_start(args, fmt);
63863 + ret = ____request_module(wait, module_param, fmt, args);
63864 + va_end(args);
63865 +
63866 + return ret;
63867 + }
63868 +#endif
63869 +
63870 + va_start(args, fmt);
63871 + ret = ____request_module(wait, NULL, fmt, args);
63872 + va_end(args);
63873 +
63874 + return ret;
63875 +}
63876 +
63877 EXPORT_SYMBOL(__request_module);
63878 #endif /* CONFIG_MODULES */
63879
63880 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
63881 *
63882 * Thus the __user pointer cast is valid here.
63883 */
63884 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
63885 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63886
63887 /*
63888 * If ret is 0, either ____call_usermodehelper failed and the
63889 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
63890 index 52fd049..3def6a8 100644
63891 --- a/kernel/kprobes.c
63892 +++ b/kernel/kprobes.c
63893 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
63894 * kernel image and loaded module images reside. This is required
63895 * so x86_64 can correctly handle the %rip-relative fixups.
63896 */
63897 - kip->insns = module_alloc(PAGE_SIZE);
63898 + kip->insns = module_alloc_exec(PAGE_SIZE);
63899 if (!kip->insns) {
63900 kfree(kip);
63901 return NULL;
63902 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
63903 */
63904 if (!list_is_singular(&kip->list)) {
63905 list_del(&kip->list);
63906 - module_free(NULL, kip->insns);
63907 + module_free_exec(NULL, kip->insns);
63908 kfree(kip);
63909 }
63910 return 1;
63911 @@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
63912 {
63913 int i, err = 0;
63914 unsigned long offset = 0, size = 0;
63915 - char *modname, namebuf[128];
63916 + char *modname, namebuf[KSYM_NAME_LEN];
63917 const char *symbol_name;
63918 void *addr;
63919 struct kprobe_blackpoint *kb;
63920 @@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
63921 const char *sym = NULL;
63922 unsigned int i = *(loff_t *) v;
63923 unsigned long offset = 0;
63924 - char *modname, namebuf[128];
63925 + char *modname, namebuf[KSYM_NAME_LEN];
63926
63927 head = &kprobe_table[i];
63928 preempt_disable();
63929 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
63930 index b2e08c9..01d8049 100644
63931 --- a/kernel/lockdep.c
63932 +++ b/kernel/lockdep.c
63933 @@ -592,6 +592,10 @@ static int static_obj(void *obj)
63934 end = (unsigned long) &_end,
63935 addr = (unsigned long) obj;
63936
63937 +#ifdef CONFIG_PAX_KERNEXEC
63938 + start = ktla_ktva(start);
63939 +#endif
63940 +
63941 /*
63942 * static variable?
63943 */
63944 @@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
63945 if (!static_obj(lock->key)) {
63946 debug_locks_off();
63947 printk("INFO: trying to register non-static key.\n");
63948 + printk("lock:%pS key:%pS.\n", lock, lock->key);
63949 printk("the code is fine but needs lockdep annotation.\n");
63950 printk("turning off the locking correctness validator.\n");
63951 dump_stack();
63952 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
63953 if (!class)
63954 return 0;
63955 }
63956 - atomic_inc((atomic_t *)&class->ops);
63957 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63958 if (very_verbose(class)) {
63959 printk("\nacquire class [%p] %s", class->key, class->name);
63960 if (class->name_version > 1)
63961 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
63962 index 91c32a0..b2c71c5 100644
63963 --- a/kernel/lockdep_proc.c
63964 +++ b/kernel/lockdep_proc.c
63965 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
63966
63967 static void print_name(struct seq_file *m, struct lock_class *class)
63968 {
63969 - char str[128];
63970 + char str[KSYM_NAME_LEN];
63971 const char *name = class->name;
63972
63973 if (!name) {
63974 diff --git a/kernel/module.c b/kernel/module.c
63975 index 178333c..04e3408 100644
63976 --- a/kernel/module.c
63977 +++ b/kernel/module.c
63978 @@ -58,6 +58,7 @@
63979 #include <linux/jump_label.h>
63980 #include <linux/pfn.h>
63981 #include <linux/bsearch.h>
63982 +#include <linux/grsecurity.h>
63983
63984 #define CREATE_TRACE_POINTS
63985 #include <trace/events/module.h>
63986 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63987
63988 /* Bounds of module allocation, for speeding __module_address.
63989 * Protected by module_mutex. */
63990 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63991 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63992 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63993
63994 int register_module_notifier(struct notifier_block * nb)
63995 {
63996 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
63997 return true;
63998
63999 list_for_each_entry_rcu(mod, &modules, list) {
64000 - struct symsearch arr[] = {
64001 + struct symsearch modarr[] = {
64002 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64003 NOT_GPL_ONLY, false },
64004 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64005 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64006 #endif
64007 };
64008
64009 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64010 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64011 return true;
64012 }
64013 return false;
64014 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64015 static int percpu_modalloc(struct module *mod,
64016 unsigned long size, unsigned long align)
64017 {
64018 - if (align > PAGE_SIZE) {
64019 + if (align-1 >= PAGE_SIZE) {
64020 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64021 mod->name, align, PAGE_SIZE);
64022 align = PAGE_SIZE;
64023 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64024 */
64025 #ifdef CONFIG_SYSFS
64026
64027 -#ifdef CONFIG_KALLSYMS
64028 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64029 static inline bool sect_empty(const Elf_Shdr *sect)
64030 {
64031 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64032 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64033
64034 static void unset_module_core_ro_nx(struct module *mod)
64035 {
64036 - set_page_attributes(mod->module_core + mod->core_text_size,
64037 - mod->module_core + mod->core_size,
64038 + set_page_attributes(mod->module_core_rw,
64039 + mod->module_core_rw + mod->core_size_rw,
64040 set_memory_x);
64041 - set_page_attributes(mod->module_core,
64042 - mod->module_core + mod->core_ro_size,
64043 + set_page_attributes(mod->module_core_rx,
64044 + mod->module_core_rx + mod->core_size_rx,
64045 set_memory_rw);
64046 }
64047
64048 static void unset_module_init_ro_nx(struct module *mod)
64049 {
64050 - set_page_attributes(mod->module_init + mod->init_text_size,
64051 - mod->module_init + mod->init_size,
64052 + set_page_attributes(mod->module_init_rw,
64053 + mod->module_init_rw + mod->init_size_rw,
64054 set_memory_x);
64055 - set_page_attributes(mod->module_init,
64056 - mod->module_init + mod->init_ro_size,
64057 + set_page_attributes(mod->module_init_rx,
64058 + mod->module_init_rx + mod->init_size_rx,
64059 set_memory_rw);
64060 }
64061
64062 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64063
64064 mutex_lock(&module_mutex);
64065 list_for_each_entry_rcu(mod, &modules, list) {
64066 - if ((mod->module_core) && (mod->core_text_size)) {
64067 - set_page_attributes(mod->module_core,
64068 - mod->module_core + mod->core_text_size,
64069 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64070 + set_page_attributes(mod->module_core_rx,
64071 + mod->module_core_rx + mod->core_size_rx,
64072 set_memory_rw);
64073 }
64074 - if ((mod->module_init) && (mod->init_text_size)) {
64075 - set_page_attributes(mod->module_init,
64076 - mod->module_init + mod->init_text_size,
64077 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64078 + set_page_attributes(mod->module_init_rx,
64079 + mod->module_init_rx + mod->init_size_rx,
64080 set_memory_rw);
64081 }
64082 }
64083 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64084
64085 mutex_lock(&module_mutex);
64086 list_for_each_entry_rcu(mod, &modules, list) {
64087 - if ((mod->module_core) && (mod->core_text_size)) {
64088 - set_page_attributes(mod->module_core,
64089 - mod->module_core + mod->core_text_size,
64090 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64091 + set_page_attributes(mod->module_core_rx,
64092 + mod->module_core_rx + mod->core_size_rx,
64093 set_memory_ro);
64094 }
64095 - if ((mod->module_init) && (mod->init_text_size)) {
64096 - set_page_attributes(mod->module_init,
64097 - mod->module_init + mod->init_text_size,
64098 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64099 + set_page_attributes(mod->module_init_rx,
64100 + mod->module_init_rx + mod->init_size_rx,
64101 set_memory_ro);
64102 }
64103 }
64104 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64105
64106 /* This may be NULL, but that's OK */
64107 unset_module_init_ro_nx(mod);
64108 - module_free(mod, mod->module_init);
64109 + module_free(mod, mod->module_init_rw);
64110 + module_free_exec(mod, mod->module_init_rx);
64111 kfree(mod->args);
64112 percpu_modfree(mod);
64113
64114 /* Free lock-classes: */
64115 - lockdep_free_key_range(mod->module_core, mod->core_size);
64116 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64117 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64118
64119 /* Finally, free the core (containing the module structure) */
64120 unset_module_core_ro_nx(mod);
64121 - module_free(mod, mod->module_core);
64122 + module_free_exec(mod, mod->module_core_rx);
64123 + module_free(mod, mod->module_core_rw);
64124
64125 #ifdef CONFIG_MPU
64126 update_protections(current->mm);
64127 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64128 unsigned int i;
64129 int ret = 0;
64130 const struct kernel_symbol *ksym;
64131 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64132 + int is_fs_load = 0;
64133 + int register_filesystem_found = 0;
64134 + char *p;
64135 +
64136 + p = strstr(mod->args, "grsec_modharden_fs");
64137 + if (p) {
64138 + char *endptr = p + strlen("grsec_modharden_fs");
64139 + /* copy \0 as well */
64140 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64141 + is_fs_load = 1;
64142 + }
64143 +#endif
64144
64145 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64146 const char *name = info->strtab + sym[i].st_name;
64147
64148 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64149 + /* it's a real shame this will never get ripped and copied
64150 + upstream! ;(
64151 + */
64152 + if (is_fs_load && !strcmp(name, "register_filesystem"))
64153 + register_filesystem_found = 1;
64154 +#endif
64155 +
64156 switch (sym[i].st_shndx) {
64157 case SHN_COMMON:
64158 /* We compiled with -fno-common. These are not
64159 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64160 ksym = resolve_symbol_wait(mod, info, name);
64161 /* Ok if resolved. */
64162 if (ksym && !IS_ERR(ksym)) {
64163 + pax_open_kernel();
64164 sym[i].st_value = ksym->value;
64165 + pax_close_kernel();
64166 break;
64167 }
64168
64169 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64170 secbase = (unsigned long)mod_percpu(mod);
64171 else
64172 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64173 + pax_open_kernel();
64174 sym[i].st_value += secbase;
64175 + pax_close_kernel();
64176 break;
64177 }
64178 }
64179
64180 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64181 + if (is_fs_load && !register_filesystem_found) {
64182 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64183 + ret = -EPERM;
64184 + }
64185 +#endif
64186 +
64187 return ret;
64188 }
64189
64190 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64191 || s->sh_entsize != ~0UL
64192 || strstarts(sname, ".init"))
64193 continue;
64194 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64195 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64196 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64197 + else
64198 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64199 DEBUGP("\t%s\n", name);
64200 }
64201 - switch (m) {
64202 - case 0: /* executable */
64203 - mod->core_size = debug_align(mod->core_size);
64204 - mod->core_text_size = mod->core_size;
64205 - break;
64206 - case 1: /* RO: text and ro-data */
64207 - mod->core_size = debug_align(mod->core_size);
64208 - mod->core_ro_size = mod->core_size;
64209 - break;
64210 - case 3: /* whole core */
64211 - mod->core_size = debug_align(mod->core_size);
64212 - break;
64213 - }
64214 }
64215
64216 DEBUGP("Init section allocation order:\n");
64217 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64218 || s->sh_entsize != ~0UL
64219 || !strstarts(sname, ".init"))
64220 continue;
64221 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64222 - | INIT_OFFSET_MASK);
64223 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64224 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64225 + else
64226 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64227 + s->sh_entsize |= INIT_OFFSET_MASK;
64228 DEBUGP("\t%s\n", sname);
64229 }
64230 - switch (m) {
64231 - case 0: /* executable */
64232 - mod->init_size = debug_align(mod->init_size);
64233 - mod->init_text_size = mod->init_size;
64234 - break;
64235 - case 1: /* RO: text and ro-data */
64236 - mod->init_size = debug_align(mod->init_size);
64237 - mod->init_ro_size = mod->init_size;
64238 - break;
64239 - case 3: /* whole init */
64240 - mod->init_size = debug_align(mod->init_size);
64241 - break;
64242 - }
64243 }
64244 }
64245
64246 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64247
64248 /* Put symbol section at end of init part of module. */
64249 symsect->sh_flags |= SHF_ALLOC;
64250 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64251 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64252 info->index.sym) | INIT_OFFSET_MASK;
64253 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64254
64255 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64256 }
64257
64258 /* Append room for core symbols at end of core part. */
64259 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64260 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64261 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64262 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64263
64264 /* Put string table section at end of init part of module. */
64265 strsect->sh_flags |= SHF_ALLOC;
64266 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64267 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64268 info->index.str) | INIT_OFFSET_MASK;
64269 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64270
64271 /* Append room for core symbols' strings at end of core part. */
64272 - info->stroffs = mod->core_size;
64273 + info->stroffs = mod->core_size_rx;
64274 __set_bit(0, info->strmap);
64275 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64276 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64277 }
64278
64279 static void add_kallsyms(struct module *mod, const struct load_info *info)
64280 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64281 /* Make sure we get permanent strtab: don't use info->strtab. */
64282 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64283
64284 + pax_open_kernel();
64285 +
64286 /* Set types up while we still have access to sections. */
64287 for (i = 0; i < mod->num_symtab; i++)
64288 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64289
64290 - mod->core_symtab = dst = mod->module_core + info->symoffs;
64291 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64292 src = mod->symtab;
64293 *dst = *src;
64294 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64295 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64296 }
64297 mod->core_num_syms = ndst;
64298
64299 - mod->core_strtab = s = mod->module_core + info->stroffs;
64300 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64301 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64302 if (test_bit(i, info->strmap))
64303 *++s = mod->strtab[i];
64304 +
64305 + pax_close_kernel();
64306 }
64307 #else
64308 static inline void layout_symtab(struct module *mod, struct load_info *info)
64309 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64310 return size == 0 ? NULL : vmalloc_exec(size);
64311 }
64312
64313 -static void *module_alloc_update_bounds(unsigned long size)
64314 +static void *module_alloc_update_bounds_rw(unsigned long size)
64315 {
64316 void *ret = module_alloc(size);
64317
64318 if (ret) {
64319 mutex_lock(&module_mutex);
64320 /* Update module bounds. */
64321 - if ((unsigned long)ret < module_addr_min)
64322 - module_addr_min = (unsigned long)ret;
64323 - if ((unsigned long)ret + size > module_addr_max)
64324 - module_addr_max = (unsigned long)ret + size;
64325 + if ((unsigned long)ret < module_addr_min_rw)
64326 + module_addr_min_rw = (unsigned long)ret;
64327 + if ((unsigned long)ret + size > module_addr_max_rw)
64328 + module_addr_max_rw = (unsigned long)ret + size;
64329 + mutex_unlock(&module_mutex);
64330 + }
64331 + return ret;
64332 +}
64333 +
64334 +static void *module_alloc_update_bounds_rx(unsigned long size)
64335 +{
64336 + void *ret = module_alloc_exec(size);
64337 +
64338 + if (ret) {
64339 + mutex_lock(&module_mutex);
64340 + /* Update module bounds. */
64341 + if ((unsigned long)ret < module_addr_min_rx)
64342 + module_addr_min_rx = (unsigned long)ret;
64343 + if ((unsigned long)ret + size > module_addr_max_rx)
64344 + module_addr_max_rx = (unsigned long)ret + size;
64345 mutex_unlock(&module_mutex);
64346 }
64347 return ret;
64348 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64349 static int check_modinfo(struct module *mod, struct load_info *info)
64350 {
64351 const char *modmagic = get_modinfo(info, "vermagic");
64352 + const char *license = get_modinfo(info, "license");
64353 int err;
64354
64355 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64356 + if (!license || !license_is_gpl_compatible(license))
64357 + return -ENOEXEC;
64358 +#endif
64359 +
64360 /* This is allowed: modprobe --force will invalidate it. */
64361 if (!modmagic) {
64362 err = try_to_force_load(mod, "bad vermagic");
64363 @@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64364 }
64365
64366 /* Set up license info based on the info section */
64367 - set_license(mod, get_modinfo(info, "license"));
64368 + set_license(mod, license);
64369
64370 return 0;
64371 }
64372 @@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64373 void *ptr;
64374
64375 /* Do the allocs. */
64376 - ptr = module_alloc_update_bounds(mod->core_size);
64377 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64378 /*
64379 * The pointer to this block is stored in the module structure
64380 * which is inside the block. Just mark it as not being a
64381 @@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64382 if (!ptr)
64383 return -ENOMEM;
64384
64385 - memset(ptr, 0, mod->core_size);
64386 - mod->module_core = ptr;
64387 + memset(ptr, 0, mod->core_size_rw);
64388 + mod->module_core_rw = ptr;
64389
64390 - ptr = module_alloc_update_bounds(mod->init_size);
64391 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64392 /*
64393 * The pointer to this block is stored in the module structure
64394 * which is inside the block. This block doesn't need to be
64395 * scanned as it contains data and code that will be freed
64396 * after the module is initialized.
64397 */
64398 - kmemleak_ignore(ptr);
64399 - if (!ptr && mod->init_size) {
64400 - module_free(mod, mod->module_core);
64401 + kmemleak_not_leak(ptr);
64402 + if (!ptr && mod->init_size_rw) {
64403 + module_free(mod, mod->module_core_rw);
64404 return -ENOMEM;
64405 }
64406 - memset(ptr, 0, mod->init_size);
64407 - mod->module_init = ptr;
64408 + memset(ptr, 0, mod->init_size_rw);
64409 + mod->module_init_rw = ptr;
64410 +
64411 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64412 + kmemleak_not_leak(ptr);
64413 + if (!ptr) {
64414 + module_free(mod, mod->module_init_rw);
64415 + module_free(mod, mod->module_core_rw);
64416 + return -ENOMEM;
64417 + }
64418 +
64419 + pax_open_kernel();
64420 + memset(ptr, 0, mod->core_size_rx);
64421 + pax_close_kernel();
64422 + mod->module_core_rx = ptr;
64423 +
64424 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64425 + kmemleak_not_leak(ptr);
64426 + if (!ptr && mod->init_size_rx) {
64427 + module_free_exec(mod, mod->module_core_rx);
64428 + module_free(mod, mod->module_init_rw);
64429 + module_free(mod, mod->module_core_rw);
64430 + return -ENOMEM;
64431 + }
64432 +
64433 + pax_open_kernel();
64434 + memset(ptr, 0, mod->init_size_rx);
64435 + pax_close_kernel();
64436 + mod->module_init_rx = ptr;
64437
64438 /* Transfer each section which specifies SHF_ALLOC */
64439 DEBUGP("final section addresses:\n");
64440 @@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64441 if (!(shdr->sh_flags & SHF_ALLOC))
64442 continue;
64443
64444 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
64445 - dest = mod->module_init
64446 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64447 - else
64448 - dest = mod->module_core + shdr->sh_entsize;
64449 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64450 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64451 + dest = mod->module_init_rw
64452 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64453 + else
64454 + dest = mod->module_init_rx
64455 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64456 + } else {
64457 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64458 + dest = mod->module_core_rw + shdr->sh_entsize;
64459 + else
64460 + dest = mod->module_core_rx + shdr->sh_entsize;
64461 + }
64462 +
64463 + if (shdr->sh_type != SHT_NOBITS) {
64464 +
64465 +#ifdef CONFIG_PAX_KERNEXEC
64466 +#ifdef CONFIG_X86_64
64467 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64468 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64469 +#endif
64470 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64471 + pax_open_kernel();
64472 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64473 + pax_close_kernel();
64474 + } else
64475 +#endif
64476
64477 - if (shdr->sh_type != SHT_NOBITS)
64478 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64479 + }
64480 /* Update sh_addr to point to copy in image. */
64481 - shdr->sh_addr = (unsigned long)dest;
64482 +
64483 +#ifdef CONFIG_PAX_KERNEXEC
64484 + if (shdr->sh_flags & SHF_EXECINSTR)
64485 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
64486 + else
64487 +#endif
64488 +
64489 + shdr->sh_addr = (unsigned long)dest;
64490 DEBUGP("\t0x%lx %s\n",
64491 shdr->sh_addr, info->secstrings + shdr->sh_name);
64492 }
64493 @@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64494 * Do it before processing of module parameters, so the module
64495 * can provide parameter accessor functions of its own.
64496 */
64497 - if (mod->module_init)
64498 - flush_icache_range((unsigned long)mod->module_init,
64499 - (unsigned long)mod->module_init
64500 - + mod->init_size);
64501 - flush_icache_range((unsigned long)mod->module_core,
64502 - (unsigned long)mod->module_core + mod->core_size);
64503 + if (mod->module_init_rx)
64504 + flush_icache_range((unsigned long)mod->module_init_rx,
64505 + (unsigned long)mod->module_init_rx
64506 + + mod->init_size_rx);
64507 + flush_icache_range((unsigned long)mod->module_core_rx,
64508 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64509
64510 set_fs(old_fs);
64511 }
64512 @@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64513 {
64514 kfree(info->strmap);
64515 percpu_modfree(mod);
64516 - module_free(mod, mod->module_init);
64517 - module_free(mod, mod->module_core);
64518 + module_free_exec(mod, mod->module_init_rx);
64519 + module_free_exec(mod, mod->module_core_rx);
64520 + module_free(mod, mod->module_init_rw);
64521 + module_free(mod, mod->module_core_rw);
64522 }
64523
64524 int __weak module_finalize(const Elf_Ehdr *hdr,
64525 @@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64526 if (err)
64527 goto free_unload;
64528
64529 + /* Now copy in args */
64530 + mod->args = strndup_user(uargs, ~0UL >> 1);
64531 + if (IS_ERR(mod->args)) {
64532 + err = PTR_ERR(mod->args);
64533 + goto free_unload;
64534 + }
64535 +
64536 /* Set up MODINFO_ATTR fields */
64537 setup_modinfo(mod, &info);
64538
64539 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64540 + {
64541 + char *p, *p2;
64542 +
64543 + if (strstr(mod->args, "grsec_modharden_netdev")) {
64544 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64545 + err = -EPERM;
64546 + goto free_modinfo;
64547 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64548 + p += strlen("grsec_modharden_normal");
64549 + p2 = strstr(p, "_");
64550 + if (p2) {
64551 + *p2 = '\0';
64552 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64553 + *p2 = '_';
64554 + }
64555 + err = -EPERM;
64556 + goto free_modinfo;
64557 + }
64558 + }
64559 +#endif
64560 +
64561 /* Fix up syms, so that st_value is a pointer to location. */
64562 err = simplify_symbols(mod, &info);
64563 if (err < 0)
64564 @@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
64565
64566 flush_module_icache(mod);
64567
64568 - /* Now copy in args */
64569 - mod->args = strndup_user(uargs, ~0UL >> 1);
64570 - if (IS_ERR(mod->args)) {
64571 - err = PTR_ERR(mod->args);
64572 - goto free_arch_cleanup;
64573 - }
64574 -
64575 /* Mark state as coming so strong_try_module_get() ignores us. */
64576 mod->state = MODULE_STATE_COMING;
64577
64578 @@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
64579 unlock:
64580 mutex_unlock(&module_mutex);
64581 synchronize_sched();
64582 - kfree(mod->args);
64583 - free_arch_cleanup:
64584 module_arch_cleanup(mod);
64585 free_modinfo:
64586 free_modinfo(mod);
64587 + kfree(mod->args);
64588 free_unload:
64589 module_unload_free(mod);
64590 free_module:
64591 @@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64592 MODULE_STATE_COMING, mod);
64593
64594 /* Set RO and NX regions for core */
64595 - set_section_ro_nx(mod->module_core,
64596 - mod->core_text_size,
64597 - mod->core_ro_size,
64598 - mod->core_size);
64599 + set_section_ro_nx(mod->module_core_rx,
64600 + mod->core_size_rx,
64601 + mod->core_size_rx,
64602 + mod->core_size_rx);
64603
64604 /* Set RO and NX regions for init */
64605 - set_section_ro_nx(mod->module_init,
64606 - mod->init_text_size,
64607 - mod->init_ro_size,
64608 - mod->init_size);
64609 + set_section_ro_nx(mod->module_init_rx,
64610 + mod->init_size_rx,
64611 + mod->init_size_rx,
64612 + mod->init_size_rx);
64613
64614 do_mod_ctors(mod);
64615 /* Start the module */
64616 @@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64617 mod->strtab = mod->core_strtab;
64618 #endif
64619 unset_module_init_ro_nx(mod);
64620 - module_free(mod, mod->module_init);
64621 - mod->module_init = NULL;
64622 - mod->init_size = 0;
64623 - mod->init_ro_size = 0;
64624 - mod->init_text_size = 0;
64625 + module_free(mod, mod->module_init_rw);
64626 + module_free_exec(mod, mod->module_init_rx);
64627 + mod->module_init_rw = NULL;
64628 + mod->module_init_rx = NULL;
64629 + mod->init_size_rw = 0;
64630 + mod->init_size_rx = 0;
64631 mutex_unlock(&module_mutex);
64632
64633 return 0;
64634 @@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
64635 unsigned long nextval;
64636
64637 /* At worse, next value is at end of module */
64638 - if (within_module_init(addr, mod))
64639 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
64640 + if (within_module_init_rx(addr, mod))
64641 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64642 + else if (within_module_init_rw(addr, mod))
64643 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64644 + else if (within_module_core_rx(addr, mod))
64645 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64646 + else if (within_module_core_rw(addr, mod))
64647 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64648 else
64649 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
64650 + return NULL;
64651
64652 /* Scan for closest preceding symbol, and next symbol. (ELF
64653 starts real symbols at 1). */
64654 @@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
64655 char buf[8];
64656
64657 seq_printf(m, "%s %u",
64658 - mod->name, mod->init_size + mod->core_size);
64659 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64660 print_unload_info(m, mod);
64661
64662 /* Informative for users. */
64663 @@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
64664 mod->state == MODULE_STATE_COMING ? "Loading":
64665 "Live");
64666 /* Used by oprofile and other similar tools. */
64667 - seq_printf(m, " 0x%pK", mod->module_core);
64668 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64669
64670 /* Taints info */
64671 if (mod->taints)
64672 @@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
64673
64674 static int __init proc_modules_init(void)
64675 {
64676 +#ifndef CONFIG_GRKERNSEC_HIDESYM
64677 +#ifdef CONFIG_GRKERNSEC_PROC_USER
64678 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64679 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64680 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64681 +#else
64682 proc_create("modules", 0, NULL, &proc_modules_operations);
64683 +#endif
64684 +#else
64685 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64686 +#endif
64687 return 0;
64688 }
64689 module_init(proc_modules_init);
64690 @@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
64691 {
64692 struct module *mod;
64693
64694 - if (addr < module_addr_min || addr > module_addr_max)
64695 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64696 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
64697 return NULL;
64698
64699 list_for_each_entry_rcu(mod, &modules, list)
64700 - if (within_module_core(addr, mod)
64701 - || within_module_init(addr, mod))
64702 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
64703 return mod;
64704 return NULL;
64705 }
64706 @@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
64707 */
64708 struct module *__module_text_address(unsigned long addr)
64709 {
64710 - struct module *mod = __module_address(addr);
64711 + struct module *mod;
64712 +
64713 +#ifdef CONFIG_X86_32
64714 + addr = ktla_ktva(addr);
64715 +#endif
64716 +
64717 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64718 + return NULL;
64719 +
64720 + mod = __module_address(addr);
64721 +
64722 if (mod) {
64723 /* Make sure it's within the text section. */
64724 - if (!within(addr, mod->module_init, mod->init_text_size)
64725 - && !within(addr, mod->module_core, mod->core_text_size))
64726 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64727 mod = NULL;
64728 }
64729 return mod;
64730 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
64731 index 7e3443f..b2a1e6b 100644
64732 --- a/kernel/mutex-debug.c
64733 +++ b/kernel/mutex-debug.c
64734 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
64735 }
64736
64737 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64738 - struct thread_info *ti)
64739 + struct task_struct *task)
64740 {
64741 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64742
64743 /* Mark the current thread as blocked on the lock: */
64744 - ti->task->blocked_on = waiter;
64745 + task->blocked_on = waiter;
64746 }
64747
64748 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64749 - struct thread_info *ti)
64750 + struct task_struct *task)
64751 {
64752 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64753 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64754 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64755 - ti->task->blocked_on = NULL;
64756 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
64757 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64758 + task->blocked_on = NULL;
64759
64760 list_del_init(&waiter->list);
64761 waiter->task = NULL;
64762 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
64763 index 0799fd3..d06ae3b 100644
64764 --- a/kernel/mutex-debug.h
64765 +++ b/kernel/mutex-debug.h
64766 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
64767 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64768 extern void debug_mutex_add_waiter(struct mutex *lock,
64769 struct mutex_waiter *waiter,
64770 - struct thread_info *ti);
64771 + struct task_struct *task);
64772 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64773 - struct thread_info *ti);
64774 + struct task_struct *task);
64775 extern void debug_mutex_unlock(struct mutex *lock);
64776 extern void debug_mutex_init(struct mutex *lock, const char *name,
64777 struct lock_class_key *key);
64778 diff --git a/kernel/mutex.c b/kernel/mutex.c
64779 index 89096dd..f91ebc5 100644
64780 --- a/kernel/mutex.c
64781 +++ b/kernel/mutex.c
64782 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64783 spin_lock_mutex(&lock->wait_lock, flags);
64784
64785 debug_mutex_lock_common(lock, &waiter);
64786 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64787 + debug_mutex_add_waiter(lock, &waiter, task);
64788
64789 /* add waiting tasks to the end of the waitqueue (FIFO): */
64790 list_add_tail(&waiter.list, &lock->wait_list);
64791 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64792 * TASK_UNINTERRUPTIBLE case.)
64793 */
64794 if (unlikely(signal_pending_state(state, task))) {
64795 - mutex_remove_waiter(lock, &waiter,
64796 - task_thread_info(task));
64797 + mutex_remove_waiter(lock, &waiter, task);
64798 mutex_release(&lock->dep_map, 1, ip);
64799 spin_unlock_mutex(&lock->wait_lock, flags);
64800
64801 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64802 done:
64803 lock_acquired(&lock->dep_map, ip);
64804 /* got the lock - rejoice! */
64805 - mutex_remove_waiter(lock, &waiter, current_thread_info());
64806 + mutex_remove_waiter(lock, &waiter, task);
64807 mutex_set_owner(lock);
64808
64809 /* set it to 0 if there are no waiters left: */
64810 diff --git a/kernel/padata.c b/kernel/padata.c
64811 index b452599..5d68f4e 100644
64812 --- a/kernel/padata.c
64813 +++ b/kernel/padata.c
64814 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
64815 padata->pd = pd;
64816 padata->cb_cpu = cb_cpu;
64817
64818 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64819 - atomic_set(&pd->seq_nr, -1);
64820 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64821 + atomic_set_unchecked(&pd->seq_nr, -1);
64822
64823 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64824 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64825
64826 target_cpu = padata_cpu_hash(padata);
64827 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64828 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
64829 padata_init_pqueues(pd);
64830 padata_init_squeues(pd);
64831 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64832 - atomic_set(&pd->seq_nr, -1);
64833 + atomic_set_unchecked(&pd->seq_nr, -1);
64834 atomic_set(&pd->reorder_objects, 0);
64835 atomic_set(&pd->refcnt, 0);
64836 pd->pinst = pinst;
64837 diff --git a/kernel/panic.c b/kernel/panic.c
64838 index b2659360..5972a0f 100644
64839 --- a/kernel/panic.c
64840 +++ b/kernel/panic.c
64841 @@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
64842 va_end(args);
64843 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
64844 #ifdef CONFIG_DEBUG_BUGVERBOSE
64845 - dump_stack();
64846 + /*
64847 + * Avoid nested stack-dumping if a panic occurs during oops processing
64848 + */
64849 + if (!oops_in_progress)
64850 + dump_stack();
64851 #endif
64852
64853 /*
64854 @@ -373,7 +377,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
64855 const char *board;
64856
64857 printk(KERN_WARNING "------------[ cut here ]------------\n");
64858 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64859 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64860 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64861 if (board)
64862 printk(KERN_WARNING "Hardware name: %s\n", board);
64863 @@ -428,7 +432,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64864 */
64865 void __stack_chk_fail(void)
64866 {
64867 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
64868 + dump_stack();
64869 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64870 __builtin_return_address(0));
64871 }
64872 EXPORT_SYMBOL(__stack_chk_fail);
64873 diff --git a/kernel/pid.c b/kernel/pid.c
64874 index fa5f722..0c93e57 100644
64875 --- a/kernel/pid.c
64876 +++ b/kernel/pid.c
64877 @@ -33,6 +33,7 @@
64878 #include <linux/rculist.h>
64879 #include <linux/bootmem.h>
64880 #include <linux/hash.h>
64881 +#include <linux/security.h>
64882 #include <linux/pid_namespace.h>
64883 #include <linux/init_task.h>
64884 #include <linux/syscalls.h>
64885 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
64886
64887 int pid_max = PID_MAX_DEFAULT;
64888
64889 -#define RESERVED_PIDS 300
64890 +#define RESERVED_PIDS 500
64891
64892 int pid_max_min = RESERVED_PIDS + 1;
64893 int pid_max_max = PID_MAX_LIMIT;
64894 @@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
64895 */
64896 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64897 {
64898 + struct task_struct *task;
64899 +
64900 rcu_lockdep_assert(rcu_read_lock_held(),
64901 "find_task_by_pid_ns() needs rcu_read_lock()"
64902 " protection");
64903 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64904 +
64905 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64906 +
64907 + if (gr_pid_is_chrooted(task))
64908 + return NULL;
64909 +
64910 + return task;
64911 }
64912
64913 struct task_struct *find_task_by_vpid(pid_t vnr)
64914 @@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
64915 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64916 }
64917
64918 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64919 +{
64920 + rcu_lockdep_assert(rcu_read_lock_held(),
64921 + "find_task_by_pid_ns() needs rcu_read_lock()"
64922 + " protection");
64923 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64924 +}
64925 +
64926 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64927 {
64928 struct pid *pid;
64929 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
64930 index e7cb76d..75eceb3 100644
64931 --- a/kernel/posix-cpu-timers.c
64932 +++ b/kernel/posix-cpu-timers.c
64933 @@ -6,6 +6,7 @@
64934 #include <linux/posix-timers.h>
64935 #include <linux/errno.h>
64936 #include <linux/math64.h>
64937 +#include <linux/security.h>
64938 #include <asm/uaccess.h>
64939 #include <linux/kernel_stat.h>
64940 #include <trace/events/timer.h>
64941 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
64942
64943 static __init int init_posix_cpu_timers(void)
64944 {
64945 - struct k_clock process = {
64946 + static struct k_clock process = {
64947 .clock_getres = process_cpu_clock_getres,
64948 .clock_get = process_cpu_clock_get,
64949 .timer_create = process_cpu_timer_create,
64950 .nsleep = process_cpu_nsleep,
64951 .nsleep_restart = process_cpu_nsleep_restart,
64952 };
64953 - struct k_clock thread = {
64954 + static struct k_clock thread = {
64955 .clock_getres = thread_cpu_clock_getres,
64956 .clock_get = thread_cpu_clock_get,
64957 .timer_create = thread_cpu_timer_create,
64958 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
64959 index 69185ae..cc2847a 100644
64960 --- a/kernel/posix-timers.c
64961 +++ b/kernel/posix-timers.c
64962 @@ -43,6 +43,7 @@
64963 #include <linux/idr.h>
64964 #include <linux/posix-clock.h>
64965 #include <linux/posix-timers.h>
64966 +#include <linux/grsecurity.h>
64967 #include <linux/syscalls.h>
64968 #include <linux/wait.h>
64969 #include <linux/workqueue.h>
64970 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
64971 * which we beg off on and pass to do_sys_settimeofday().
64972 */
64973
64974 -static struct k_clock posix_clocks[MAX_CLOCKS];
64975 +static struct k_clock *posix_clocks[MAX_CLOCKS];
64976
64977 /*
64978 * These ones are defined below.
64979 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
64980 */
64981 static __init int init_posix_timers(void)
64982 {
64983 - struct k_clock clock_realtime = {
64984 + static struct k_clock clock_realtime = {
64985 .clock_getres = hrtimer_get_res,
64986 .clock_get = posix_clock_realtime_get,
64987 .clock_set = posix_clock_realtime_set,
64988 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
64989 .timer_get = common_timer_get,
64990 .timer_del = common_timer_del,
64991 };
64992 - struct k_clock clock_monotonic = {
64993 + static struct k_clock clock_monotonic = {
64994 .clock_getres = hrtimer_get_res,
64995 .clock_get = posix_ktime_get_ts,
64996 .nsleep = common_nsleep,
64997 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
64998 .timer_get = common_timer_get,
64999 .timer_del = common_timer_del,
65000 };
65001 - struct k_clock clock_monotonic_raw = {
65002 + static struct k_clock clock_monotonic_raw = {
65003 .clock_getres = hrtimer_get_res,
65004 .clock_get = posix_get_monotonic_raw,
65005 };
65006 - struct k_clock clock_realtime_coarse = {
65007 + static struct k_clock clock_realtime_coarse = {
65008 .clock_getres = posix_get_coarse_res,
65009 .clock_get = posix_get_realtime_coarse,
65010 };
65011 - struct k_clock clock_monotonic_coarse = {
65012 + static struct k_clock clock_monotonic_coarse = {
65013 .clock_getres = posix_get_coarse_res,
65014 .clock_get = posix_get_monotonic_coarse,
65015 };
65016 - struct k_clock clock_boottime = {
65017 + static struct k_clock clock_boottime = {
65018 .clock_getres = hrtimer_get_res,
65019 .clock_get = posix_get_boottime,
65020 .nsleep = common_nsleep,
65021 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65022 return;
65023 }
65024
65025 - posix_clocks[clock_id] = *new_clock;
65026 + posix_clocks[clock_id] = new_clock;
65027 }
65028 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65029
65030 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65031 return (id & CLOCKFD_MASK) == CLOCKFD ?
65032 &clock_posix_dynamic : &clock_posix_cpu;
65033
65034 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65035 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65036 return NULL;
65037 - return &posix_clocks[id];
65038 + return posix_clocks[id];
65039 }
65040
65041 static int common_timer_create(struct k_itimer *new_timer)
65042 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65043 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65044 return -EFAULT;
65045
65046 + /* only the CLOCK_REALTIME clock can be set, all other clocks
65047 + have their clock_set fptr set to a nosettime dummy function
65048 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65049 + call common_clock_set, which calls do_sys_settimeofday, which
65050 + we hook
65051 + */
65052 +
65053 return kc->clock_set(which_clock, &new_tp);
65054 }
65055
65056 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65057 index d523593..68197a4 100644
65058 --- a/kernel/power/poweroff.c
65059 +++ b/kernel/power/poweroff.c
65060 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65061 .enable_mask = SYSRQ_ENABLE_BOOT,
65062 };
65063
65064 -static int pm_sysrq_init(void)
65065 +static int __init pm_sysrq_init(void)
65066 {
65067 register_sysrq_key('o', &sysrq_poweroff_op);
65068 return 0;
65069 diff --git a/kernel/power/process.c b/kernel/power/process.c
65070 index addbbe5..f9e32e0 100644
65071 --- a/kernel/power/process.c
65072 +++ b/kernel/power/process.c
65073 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65074 u64 elapsed_csecs64;
65075 unsigned int elapsed_csecs;
65076 bool wakeup = false;
65077 + bool timedout = false;
65078
65079 do_gettimeofday(&start);
65080
65081 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65082
65083 while (true) {
65084 todo = 0;
65085 + if (time_after(jiffies, end_time))
65086 + timedout = true;
65087 read_lock(&tasklist_lock);
65088 do_each_thread(g, p) {
65089 if (frozen(p) || !freezable(p))
65090 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65091 * try_to_stop() after schedule() in ptrace/signal
65092 * stop sees TIF_FREEZE.
65093 */
65094 - if (!task_is_stopped_or_traced(p) &&
65095 - !freezer_should_skip(p))
65096 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65097 todo++;
65098 + if (timedout) {
65099 + printk(KERN_ERR "Task refusing to freeze:\n");
65100 + sched_show_task(p);
65101 + }
65102 + }
65103 } while_each_thread(g, p);
65104 read_unlock(&tasklist_lock);
65105
65106 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65107 todo += wq_busy;
65108 }
65109
65110 - if (!todo || time_after(jiffies, end_time))
65111 + if (!todo || timedout)
65112 break;
65113
65114 if (pm_wakeup_pending()) {
65115 diff --git a/kernel/printk.c b/kernel/printk.c
65116 index 7982a0a..2095fdc 100644
65117 --- a/kernel/printk.c
65118 +++ b/kernel/printk.c
65119 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65120 if (from_file && type != SYSLOG_ACTION_OPEN)
65121 return 0;
65122
65123 +#ifdef CONFIG_GRKERNSEC_DMESG
65124 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65125 + return -EPERM;
65126 +#endif
65127 +
65128 if (syslog_action_restricted(type)) {
65129 if (capable(CAP_SYSLOG))
65130 return 0;
65131 diff --git a/kernel/profile.c b/kernel/profile.c
65132 index 76b8e77..a2930e8 100644
65133 --- a/kernel/profile.c
65134 +++ b/kernel/profile.c
65135 @@ -39,7 +39,7 @@ struct profile_hit {
65136 /* Oprofile timer tick hook */
65137 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65138
65139 -static atomic_t *prof_buffer;
65140 +static atomic_unchecked_t *prof_buffer;
65141 static unsigned long prof_len, prof_shift;
65142
65143 int prof_on __read_mostly;
65144 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65145 hits[i].pc = 0;
65146 continue;
65147 }
65148 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65149 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65150 hits[i].hits = hits[i].pc = 0;
65151 }
65152 }
65153 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65154 * Add the current hit(s) and flush the write-queue out
65155 * to the global buffer:
65156 */
65157 - atomic_add(nr_hits, &prof_buffer[pc]);
65158 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65159 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65160 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65161 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65162 hits[i].pc = hits[i].hits = 0;
65163 }
65164 out:
65165 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65166 {
65167 unsigned long pc;
65168 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65169 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65170 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65171 }
65172 #endif /* !CONFIG_SMP */
65173
65174 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65175 return -EFAULT;
65176 buf++; p++; count--; read++;
65177 }
65178 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65179 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65180 if (copy_to_user(buf, (void *)pnt, count))
65181 return -EFAULT;
65182 read += count;
65183 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65184 }
65185 #endif
65186 profile_discard_flip_buffers();
65187 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65188 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65189 return count;
65190 }
65191
65192 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65193 index 78ab24a..332c915 100644
65194 --- a/kernel/ptrace.c
65195 +++ b/kernel/ptrace.c
65196 @@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65197 return ret;
65198 }
65199
65200 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65201 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65202 + unsigned int log)
65203 {
65204 const struct cred *cred = current_cred(), *tcred;
65205
65206 @@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65207 cred->gid == tcred->sgid &&
65208 cred->gid == tcred->gid))
65209 goto ok;
65210 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65211 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65212 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65213 goto ok;
65214 rcu_read_unlock();
65215 return -EPERM;
65216 @@ -207,7 +209,9 @@ ok:
65217 smp_rmb();
65218 if (task->mm)
65219 dumpable = get_dumpable(task->mm);
65220 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65221 + if (!dumpable &&
65222 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65223 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65224 return -EPERM;
65225
65226 return security_ptrace_access_check(task, mode);
65227 @@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65228 {
65229 int err;
65230 task_lock(task);
65231 - err = __ptrace_may_access(task, mode);
65232 + err = __ptrace_may_access(task, mode, 0);
65233 + task_unlock(task);
65234 + return !err;
65235 +}
65236 +
65237 +bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65238 +{
65239 + return __ptrace_may_access(task, mode, 0);
65240 +}
65241 +
65242 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65243 +{
65244 + int err;
65245 + task_lock(task);
65246 + err = __ptrace_may_access(task, mode, 1);
65247 task_unlock(task);
65248 return !err;
65249 }
65250 @@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65251 goto out;
65252
65253 task_lock(task);
65254 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65255 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65256 task_unlock(task);
65257 if (retval)
65258 goto unlock_creds;
65259 @@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65260 task->ptrace = PT_PTRACED;
65261 if (seize)
65262 task->ptrace |= PT_SEIZED;
65263 - if (task_ns_capable(task, CAP_SYS_PTRACE))
65264 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65265 task->ptrace |= PT_PTRACE_CAP;
65266
65267 __ptrace_link(task, current);
65268 @@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65269 break;
65270 return -EIO;
65271 }
65272 - if (copy_to_user(dst, buf, retval))
65273 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65274 return -EFAULT;
65275 copied += retval;
65276 src += retval;
65277 @@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65278 bool seized = child->ptrace & PT_SEIZED;
65279 int ret = -EIO;
65280 siginfo_t siginfo, *si;
65281 - void __user *datavp = (void __user *) data;
65282 + void __user *datavp = (__force void __user *) data;
65283 unsigned long __user *datalp = datavp;
65284 unsigned long flags;
65285
65286 @@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65287 goto out;
65288 }
65289
65290 + if (gr_handle_ptrace(child, request)) {
65291 + ret = -EPERM;
65292 + goto out_put_task_struct;
65293 + }
65294 +
65295 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65296 ret = ptrace_attach(child, request, data);
65297 /*
65298 * Some architectures need to do book-keeping after
65299 * a ptrace attach.
65300 */
65301 - if (!ret)
65302 + if (!ret) {
65303 arch_ptrace_attach(child);
65304 + gr_audit_ptrace(child);
65305 + }
65306 goto out_put_task_struct;
65307 }
65308
65309 @@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65310 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65311 if (copied != sizeof(tmp))
65312 return -EIO;
65313 - return put_user(tmp, (unsigned long __user *)data);
65314 + return put_user(tmp, (__force unsigned long __user *)data);
65315 }
65316
65317 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65318 @@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65319 goto out;
65320 }
65321
65322 + if (gr_handle_ptrace(child, request)) {
65323 + ret = -EPERM;
65324 + goto out_put_task_struct;
65325 + }
65326 +
65327 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65328 ret = ptrace_attach(child, request, data);
65329 /*
65330 * Some architectures need to do book-keeping after
65331 * a ptrace attach.
65332 */
65333 - if (!ret)
65334 + if (!ret) {
65335 arch_ptrace_attach(child);
65336 + gr_audit_ptrace(child);
65337 + }
65338 goto out_put_task_struct;
65339 }
65340
65341 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65342 index 764825c..3aa6ac4 100644
65343 --- a/kernel/rcutorture.c
65344 +++ b/kernel/rcutorture.c
65345 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65346 { 0 };
65347 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65348 { 0 };
65349 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65350 -static atomic_t n_rcu_torture_alloc;
65351 -static atomic_t n_rcu_torture_alloc_fail;
65352 -static atomic_t n_rcu_torture_free;
65353 -static atomic_t n_rcu_torture_mberror;
65354 -static atomic_t n_rcu_torture_error;
65355 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65356 +static atomic_unchecked_t n_rcu_torture_alloc;
65357 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65358 +static atomic_unchecked_t n_rcu_torture_free;
65359 +static atomic_unchecked_t n_rcu_torture_mberror;
65360 +static atomic_unchecked_t n_rcu_torture_error;
65361 static long n_rcu_torture_boost_ktrerror;
65362 static long n_rcu_torture_boost_rterror;
65363 static long n_rcu_torture_boost_failure;
65364 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65365
65366 spin_lock_bh(&rcu_torture_lock);
65367 if (list_empty(&rcu_torture_freelist)) {
65368 - atomic_inc(&n_rcu_torture_alloc_fail);
65369 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65370 spin_unlock_bh(&rcu_torture_lock);
65371 return NULL;
65372 }
65373 - atomic_inc(&n_rcu_torture_alloc);
65374 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65375 p = rcu_torture_freelist.next;
65376 list_del_init(p);
65377 spin_unlock_bh(&rcu_torture_lock);
65378 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65379 static void
65380 rcu_torture_free(struct rcu_torture *p)
65381 {
65382 - atomic_inc(&n_rcu_torture_free);
65383 + atomic_inc_unchecked(&n_rcu_torture_free);
65384 spin_lock_bh(&rcu_torture_lock);
65385 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65386 spin_unlock_bh(&rcu_torture_lock);
65387 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65388 i = rp->rtort_pipe_count;
65389 if (i > RCU_TORTURE_PIPE_LEN)
65390 i = RCU_TORTURE_PIPE_LEN;
65391 - atomic_inc(&rcu_torture_wcount[i]);
65392 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65393 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65394 rp->rtort_mbtest = 0;
65395 rcu_torture_free(rp);
65396 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65397 i = rp->rtort_pipe_count;
65398 if (i > RCU_TORTURE_PIPE_LEN)
65399 i = RCU_TORTURE_PIPE_LEN;
65400 - atomic_inc(&rcu_torture_wcount[i]);
65401 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65402 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65403 rp->rtort_mbtest = 0;
65404 list_del(&rp->rtort_free);
65405 @@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65406 i = old_rp->rtort_pipe_count;
65407 if (i > RCU_TORTURE_PIPE_LEN)
65408 i = RCU_TORTURE_PIPE_LEN;
65409 - atomic_inc(&rcu_torture_wcount[i]);
65410 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65411 old_rp->rtort_pipe_count++;
65412 cur_ops->deferred_free(old_rp);
65413 }
65414 @@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65415 return;
65416 }
65417 if (p->rtort_mbtest == 0)
65418 - atomic_inc(&n_rcu_torture_mberror);
65419 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65420 spin_lock(&rand_lock);
65421 cur_ops->read_delay(&rand);
65422 n_rcu_torture_timers++;
65423 @@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65424 continue;
65425 }
65426 if (p->rtort_mbtest == 0)
65427 - atomic_inc(&n_rcu_torture_mberror);
65428 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65429 cur_ops->read_delay(&rand);
65430 preempt_disable();
65431 pipe_count = p->rtort_pipe_count;
65432 @@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65433 rcu_torture_current,
65434 rcu_torture_current_version,
65435 list_empty(&rcu_torture_freelist),
65436 - atomic_read(&n_rcu_torture_alloc),
65437 - atomic_read(&n_rcu_torture_alloc_fail),
65438 - atomic_read(&n_rcu_torture_free),
65439 - atomic_read(&n_rcu_torture_mberror),
65440 + atomic_read_unchecked(&n_rcu_torture_alloc),
65441 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65442 + atomic_read_unchecked(&n_rcu_torture_free),
65443 + atomic_read_unchecked(&n_rcu_torture_mberror),
65444 n_rcu_torture_boost_ktrerror,
65445 n_rcu_torture_boost_rterror,
65446 n_rcu_torture_boost_failure,
65447 n_rcu_torture_boosts,
65448 n_rcu_torture_timers);
65449 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65450 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65451 n_rcu_torture_boost_ktrerror != 0 ||
65452 n_rcu_torture_boost_rterror != 0 ||
65453 n_rcu_torture_boost_failure != 0)
65454 @@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65455 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65456 if (i > 1) {
65457 cnt += sprintf(&page[cnt], "!!! ");
65458 - atomic_inc(&n_rcu_torture_error);
65459 + atomic_inc_unchecked(&n_rcu_torture_error);
65460 WARN_ON_ONCE(1);
65461 }
65462 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65463 @@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65464 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65465 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65466 cnt += sprintf(&page[cnt], " %d",
65467 - atomic_read(&rcu_torture_wcount[i]));
65468 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65469 }
65470 cnt += sprintf(&page[cnt], "\n");
65471 if (cur_ops->stats)
65472 @@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65473
65474 if (cur_ops->cleanup)
65475 cur_ops->cleanup();
65476 - if (atomic_read(&n_rcu_torture_error))
65477 + if (atomic_read_unchecked(&n_rcu_torture_error))
65478 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65479 else
65480 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65481 @@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65482
65483 rcu_torture_current = NULL;
65484 rcu_torture_current_version = 0;
65485 - atomic_set(&n_rcu_torture_alloc, 0);
65486 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65487 - atomic_set(&n_rcu_torture_free, 0);
65488 - atomic_set(&n_rcu_torture_mberror, 0);
65489 - atomic_set(&n_rcu_torture_error, 0);
65490 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65491 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65492 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65493 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65494 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65495 n_rcu_torture_boost_ktrerror = 0;
65496 n_rcu_torture_boost_rterror = 0;
65497 n_rcu_torture_boost_failure = 0;
65498 n_rcu_torture_boosts = 0;
65499 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65500 - atomic_set(&rcu_torture_wcount[i], 0);
65501 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65502 for_each_possible_cpu(cpu) {
65503 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65504 per_cpu(rcu_torture_count, cpu)[i] = 0;
65505 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65506 index 6b76d81..7afc1b3 100644
65507 --- a/kernel/rcutree.c
65508 +++ b/kernel/rcutree.c
65509 @@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65510 trace_rcu_dyntick("Start");
65511 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65512 smp_mb__before_atomic_inc(); /* See above. */
65513 - atomic_inc(&rdtp->dynticks);
65514 + atomic_inc_unchecked(&rdtp->dynticks);
65515 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65516 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65517 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65518 local_irq_restore(flags);
65519 }
65520
65521 @@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65522 return;
65523 }
65524 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65525 - atomic_inc(&rdtp->dynticks);
65526 + atomic_inc_unchecked(&rdtp->dynticks);
65527 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65528 smp_mb__after_atomic_inc(); /* See above. */
65529 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65530 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65531 trace_rcu_dyntick("End");
65532 local_irq_restore(flags);
65533 }
65534 @@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65535 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65536
65537 if (rdtp->dynticks_nmi_nesting == 0 &&
65538 - (atomic_read(&rdtp->dynticks) & 0x1))
65539 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65540 return;
65541 rdtp->dynticks_nmi_nesting++;
65542 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65543 - atomic_inc(&rdtp->dynticks);
65544 + atomic_inc_unchecked(&rdtp->dynticks);
65545 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65546 smp_mb__after_atomic_inc(); /* See above. */
65547 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65548 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65549 }
65550
65551 /**
65552 @@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
65553 return;
65554 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65555 smp_mb__before_atomic_inc(); /* See above. */
65556 - atomic_inc(&rdtp->dynticks);
65557 + atomic_inc_unchecked(&rdtp->dynticks);
65558 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65559 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65560 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65561 }
65562
65563 /**
65564 @@ -474,7 +474,7 @@ void rcu_irq_exit(void)
65565 */
65566 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65567 {
65568 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65569 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65570 return 0;
65571 }
65572
65573 @@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
65574 unsigned int curr;
65575 unsigned int snap;
65576
65577 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
65578 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65579 snap = (unsigned int)rdp->dynticks_snap;
65580
65581 /*
65582 @@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
65583 /*
65584 * Do RCU core processing for the current CPU.
65585 */
65586 -static void rcu_process_callbacks(struct softirq_action *unused)
65587 +static void rcu_process_callbacks(void)
65588 {
65589 trace_rcu_utilization("Start RCU core");
65590 __rcu_process_callbacks(&rcu_sched_state,
65591 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
65592 index 849ce9e..74bc9de 100644
65593 --- a/kernel/rcutree.h
65594 +++ b/kernel/rcutree.h
65595 @@ -86,7 +86,7 @@
65596 struct rcu_dynticks {
65597 int dynticks_nesting; /* Track irq/process nesting level. */
65598 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65599 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65600 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65601 };
65602
65603 /* RCU's kthread states for tracing. */
65604 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
65605 index 4b9b9f8..2326053 100644
65606 --- a/kernel/rcutree_plugin.h
65607 +++ b/kernel/rcutree_plugin.h
65608 @@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
65609
65610 /* Clean up and exit. */
65611 smp_mb(); /* ensure expedited GP seen before counter increment. */
65612 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65613 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65614 unlock_mb_ret:
65615 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65616 mb_ret:
65617 @@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
65618
65619 #else /* #ifndef CONFIG_SMP */
65620
65621 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65622 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65623 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65624 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65625
65626 static int synchronize_sched_expedited_cpu_stop(void *data)
65627 {
65628 @@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
65629 int firstsnap, s, snap, trycount = 0;
65630
65631 /* Note that atomic_inc_return() implies full memory barrier. */
65632 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65633 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65634 get_online_cpus();
65635
65636 /*
65637 @@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
65638 }
65639
65640 /* Check to see if someone else did our work for us. */
65641 - s = atomic_read(&sync_sched_expedited_done);
65642 + s = atomic_read_unchecked(&sync_sched_expedited_done);
65643 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65644 smp_mb(); /* ensure test happens before caller kfree */
65645 return;
65646 @@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
65647 * grace period works for us.
65648 */
65649 get_online_cpus();
65650 - snap = atomic_read(&sync_sched_expedited_started) - 1;
65651 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65652 smp_mb(); /* ensure read is before try_stop_cpus(). */
65653 }
65654
65655 @@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
65656 * than we did beat us to the punch.
65657 */
65658 do {
65659 - s = atomic_read(&sync_sched_expedited_done);
65660 + s = atomic_read_unchecked(&sync_sched_expedited_done);
65661 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65662 smp_mb(); /* ensure test happens before caller kfree */
65663 break;
65664 }
65665 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65666 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65667
65668 put_online_cpus();
65669 }
65670 @@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
65671 for_each_online_cpu(thatcpu) {
65672 if (thatcpu == cpu)
65673 continue;
65674 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
65675 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
65676 thatcpu).dynticks);
65677 smp_mb(); /* Order sampling of snap with end of grace period. */
65678 if ((snap & 0x1) != 0) {
65679 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
65680 index 9feffa4..54058df 100644
65681 --- a/kernel/rcutree_trace.c
65682 +++ b/kernel/rcutree_trace.c
65683 @@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
65684 rdp->qs_pending);
65685 #ifdef CONFIG_NO_HZ
65686 seq_printf(m, " dt=%d/%d/%d df=%lu",
65687 - atomic_read(&rdp->dynticks->dynticks),
65688 + atomic_read_unchecked(&rdp->dynticks->dynticks),
65689 rdp->dynticks->dynticks_nesting,
65690 rdp->dynticks->dynticks_nmi_nesting,
65691 rdp->dynticks_fqs);
65692 @@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
65693 rdp->qs_pending);
65694 #ifdef CONFIG_NO_HZ
65695 seq_printf(m, ",%d,%d,%d,%lu",
65696 - atomic_read(&rdp->dynticks->dynticks),
65697 + atomic_read_unchecked(&rdp->dynticks->dynticks),
65698 rdp->dynticks->dynticks_nesting,
65699 rdp->dynticks->dynticks_nmi_nesting,
65700 rdp->dynticks_fqs);
65701 diff --git a/kernel/resource.c b/kernel/resource.c
65702 index 7640b3a..5879283 100644
65703 --- a/kernel/resource.c
65704 +++ b/kernel/resource.c
65705 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
65706
65707 static int __init ioresources_init(void)
65708 {
65709 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65710 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65711 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65712 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65713 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65714 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65715 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65716 +#endif
65717 +#else
65718 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65719 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65720 +#endif
65721 return 0;
65722 }
65723 __initcall(ioresources_init);
65724 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
65725 index 3d9f31c..7fefc9e 100644
65726 --- a/kernel/rtmutex-tester.c
65727 +++ b/kernel/rtmutex-tester.c
65728 @@ -20,7 +20,7 @@
65729 #define MAX_RT_TEST_MUTEXES 8
65730
65731 static spinlock_t rttest_lock;
65732 -static atomic_t rttest_event;
65733 +static atomic_unchecked_t rttest_event;
65734
65735 struct test_thread_data {
65736 int opcode;
65737 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65738
65739 case RTTEST_LOCKCONT:
65740 td->mutexes[td->opdata] = 1;
65741 - td->event = atomic_add_return(1, &rttest_event);
65742 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65743 return 0;
65744
65745 case RTTEST_RESET:
65746 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65747 return 0;
65748
65749 case RTTEST_RESETEVENT:
65750 - atomic_set(&rttest_event, 0);
65751 + atomic_set_unchecked(&rttest_event, 0);
65752 return 0;
65753
65754 default:
65755 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65756 return ret;
65757
65758 td->mutexes[id] = 1;
65759 - td->event = atomic_add_return(1, &rttest_event);
65760 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65761 rt_mutex_lock(&mutexes[id]);
65762 - td->event = atomic_add_return(1, &rttest_event);
65763 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65764 td->mutexes[id] = 4;
65765 return 0;
65766
65767 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65768 return ret;
65769
65770 td->mutexes[id] = 1;
65771 - td->event = atomic_add_return(1, &rttest_event);
65772 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65773 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65774 - td->event = atomic_add_return(1, &rttest_event);
65775 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65776 td->mutexes[id] = ret ? 0 : 4;
65777 return ret ? -EINTR : 0;
65778
65779 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65780 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65781 return ret;
65782
65783 - td->event = atomic_add_return(1, &rttest_event);
65784 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65785 rt_mutex_unlock(&mutexes[id]);
65786 - td->event = atomic_add_return(1, &rttest_event);
65787 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65788 td->mutexes[id] = 0;
65789 return 0;
65790
65791 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65792 break;
65793
65794 td->mutexes[dat] = 2;
65795 - td->event = atomic_add_return(1, &rttest_event);
65796 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65797 break;
65798
65799 default:
65800 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65801 return;
65802
65803 td->mutexes[dat] = 3;
65804 - td->event = atomic_add_return(1, &rttest_event);
65805 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65806 break;
65807
65808 case RTTEST_LOCKNOWAIT:
65809 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65810 return;
65811
65812 td->mutexes[dat] = 1;
65813 - td->event = atomic_add_return(1, &rttest_event);
65814 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65815 return;
65816
65817 default:
65818 diff --git a/kernel/sched.c b/kernel/sched.c
65819 index d6b149c..896cbb8 100644
65820 --- a/kernel/sched.c
65821 +++ b/kernel/sched.c
65822 @@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
65823 BUG(); /* the idle class will always have a runnable task */
65824 }
65825
65826 +#ifdef CONFIG_GRKERNSEC_SETXID
65827 +extern void gr_delayed_cred_worker(void);
65828 +static inline void gr_cred_schedule(void)
65829 +{
65830 + if (unlikely(current->delayed_cred))
65831 + gr_delayed_cred_worker();
65832 +}
65833 +#else
65834 +static inline void gr_cred_schedule(void)
65835 +{
65836 +}
65837 +#endif
65838 +
65839 /*
65840 * __schedule() is the main scheduler function.
65841 */
65842 @@ -4408,6 +4421,8 @@ need_resched:
65843
65844 schedule_debug(prev);
65845
65846 + gr_cred_schedule();
65847 +
65848 if (sched_feat(HRTICK))
65849 hrtick_clear(rq);
65850
65851 @@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
65852 /* convert nice value [19,-20] to rlimit style value [1,40] */
65853 int nice_rlim = 20 - nice;
65854
65855 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65856 +
65857 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65858 capable(CAP_SYS_NICE));
65859 }
65860 @@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65861 if (nice > 19)
65862 nice = 19;
65863
65864 - if (increment < 0 && !can_nice(current, nice))
65865 + if (increment < 0 && (!can_nice(current, nice) ||
65866 + gr_handle_chroot_nice()))
65867 return -EPERM;
65868
65869 retval = security_task_setnice(current, nice);
65870 @@ -5288,6 +5306,7 @@ recheck:
65871 unsigned long rlim_rtprio =
65872 task_rlimit(p, RLIMIT_RTPRIO);
65873
65874 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65875 /* can't set/change the rt policy */
65876 if (policy != p->policy && !rlim_rtprio)
65877 return -EPERM;
65878 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
65879 index 429242f..d7cca82 100644
65880 --- a/kernel/sched_autogroup.c
65881 +++ b/kernel/sched_autogroup.c
65882 @@ -7,7 +7,7 @@
65883
65884 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65885 static struct autogroup autogroup_default;
65886 -static atomic_t autogroup_seq_nr;
65887 +static atomic_unchecked_t autogroup_seq_nr;
65888
65889 static void __init autogroup_init(struct task_struct *init_task)
65890 {
65891 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
65892
65893 kref_init(&ag->kref);
65894 init_rwsem(&ag->lock);
65895 - ag->id = atomic_inc_return(&autogroup_seq_nr);
65896 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65897 ag->tg = tg;
65898 #ifdef CONFIG_RT_GROUP_SCHED
65899 /*
65900 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
65901 index 8a39fa3..34f3dbc 100644
65902 --- a/kernel/sched_fair.c
65903 +++ b/kernel/sched_fair.c
65904 @@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
65905 * run_rebalance_domains is triggered when needed from the scheduler tick.
65906 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65907 */
65908 -static void run_rebalance_domains(struct softirq_action *h)
65909 +static void run_rebalance_domains(void)
65910 {
65911 int this_cpu = smp_processor_id();
65912 struct rq *this_rq = cpu_rq(this_cpu);
65913 diff --git a/kernel/signal.c b/kernel/signal.c
65914 index 2065515..aed2987 100644
65915 --- a/kernel/signal.c
65916 +++ b/kernel/signal.c
65917 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
65918
65919 int print_fatal_signals __read_mostly;
65920
65921 -static void __user *sig_handler(struct task_struct *t, int sig)
65922 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
65923 {
65924 return t->sighand->action[sig - 1].sa.sa_handler;
65925 }
65926
65927 -static int sig_handler_ignored(void __user *handler, int sig)
65928 +static int sig_handler_ignored(__sighandler_t handler, int sig)
65929 {
65930 /* Is it explicitly or implicitly ignored? */
65931 return handler == SIG_IGN ||
65932 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
65933 static int sig_task_ignored(struct task_struct *t, int sig,
65934 int from_ancestor_ns)
65935 {
65936 - void __user *handler;
65937 + __sighandler_t handler;
65938
65939 handler = sig_handler(t, sig);
65940
65941 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
65942 atomic_inc(&user->sigpending);
65943 rcu_read_unlock();
65944
65945 + if (!override_rlimit)
65946 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65947 +
65948 if (override_rlimit ||
65949 atomic_read(&user->sigpending) <=
65950 task_rlimit(t, RLIMIT_SIGPENDING)) {
65951 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
65952
65953 int unhandled_signal(struct task_struct *tsk, int sig)
65954 {
65955 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65956 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65957 if (is_global_init(tsk))
65958 return 1;
65959 if (handler != SIG_IGN && handler != SIG_DFL)
65960 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
65961 }
65962 }
65963
65964 + /* allow glibc communication via tgkill to other threads in our
65965 + thread group */
65966 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65967 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65968 + && gr_handle_signal(t, sig))
65969 + return -EPERM;
65970 +
65971 return security_task_kill(t, info, sig, 0);
65972 }
65973
65974 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
65975 return send_signal(sig, info, p, 1);
65976 }
65977
65978 -static int
65979 +int
65980 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65981 {
65982 return send_signal(sig, info, t, 0);
65983 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65984 unsigned long int flags;
65985 int ret, blocked, ignored;
65986 struct k_sigaction *action;
65987 + int is_unhandled = 0;
65988
65989 spin_lock_irqsave(&t->sighand->siglock, flags);
65990 action = &t->sighand->action[sig-1];
65991 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65992 }
65993 if (action->sa.sa_handler == SIG_DFL)
65994 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65995 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65996 + is_unhandled = 1;
65997 ret = specific_send_sig_info(sig, info, t);
65998 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65999
66000 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
66001 + normal operation */
66002 + if (is_unhandled) {
66003 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66004 + gr_handle_crash(t, sig);
66005 + }
66006 +
66007 return ret;
66008 }
66009
66010 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66011 ret = check_kill_permission(sig, info, p);
66012 rcu_read_unlock();
66013
66014 - if (!ret && sig)
66015 + if (!ret && sig) {
66016 ret = do_send_sig_info(sig, info, p, true);
66017 + if (!ret)
66018 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66019 + }
66020
66021 return ret;
66022 }
66023 @@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66024 int error = -ESRCH;
66025
66026 rcu_read_lock();
66027 - p = find_task_by_vpid(pid);
66028 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66029 + /* allow glibc communication via tgkill to other threads in our
66030 + thread group */
66031 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66032 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
66033 + p = find_task_by_vpid_unrestricted(pid);
66034 + else
66035 +#endif
66036 + p = find_task_by_vpid(pid);
66037 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66038 error = check_kill_permission(sig, info, p);
66039 /*
66040 diff --git a/kernel/smp.c b/kernel/smp.c
66041 index db197d6..17aef0b 100644
66042 --- a/kernel/smp.c
66043 +++ b/kernel/smp.c
66044 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66045 }
66046 EXPORT_SYMBOL(smp_call_function);
66047
66048 -void ipi_call_lock(void)
66049 +void ipi_call_lock(void) __acquires(call_function.lock)
66050 {
66051 raw_spin_lock(&call_function.lock);
66052 }
66053
66054 -void ipi_call_unlock(void)
66055 +void ipi_call_unlock(void) __releases(call_function.lock)
66056 {
66057 raw_spin_unlock(&call_function.lock);
66058 }
66059
66060 -void ipi_call_lock_irq(void)
66061 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
66062 {
66063 raw_spin_lock_irq(&call_function.lock);
66064 }
66065
66066 -void ipi_call_unlock_irq(void)
66067 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
66068 {
66069 raw_spin_unlock_irq(&call_function.lock);
66070 }
66071 diff --git a/kernel/softirq.c b/kernel/softirq.c
66072 index 2c71d91..1021f81 100644
66073 --- a/kernel/softirq.c
66074 +++ b/kernel/softirq.c
66075 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66076
66077 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66078
66079 -char *softirq_to_name[NR_SOFTIRQS] = {
66080 +const char * const softirq_to_name[NR_SOFTIRQS] = {
66081 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66082 "TASKLET", "SCHED", "HRTIMER", "RCU"
66083 };
66084 @@ -235,7 +235,7 @@ restart:
66085 kstat_incr_softirqs_this_cpu(vec_nr);
66086
66087 trace_softirq_entry(vec_nr);
66088 - h->action(h);
66089 + h->action();
66090 trace_softirq_exit(vec_nr);
66091 if (unlikely(prev_count != preempt_count())) {
66092 printk(KERN_ERR "huh, entered softirq %u %s %p"
66093 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66094 local_irq_restore(flags);
66095 }
66096
66097 -void open_softirq(int nr, void (*action)(struct softirq_action *))
66098 +void open_softirq(int nr, void (*action)(void))
66099 {
66100 - softirq_vec[nr].action = action;
66101 + pax_open_kernel();
66102 + *(void **)&softirq_vec[nr].action = action;
66103 + pax_close_kernel();
66104 }
66105
66106 /*
66107 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66108
66109 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66110
66111 -static void tasklet_action(struct softirq_action *a)
66112 +static void tasklet_action(void)
66113 {
66114 struct tasklet_struct *list;
66115
66116 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66117 }
66118 }
66119
66120 -static void tasklet_hi_action(struct softirq_action *a)
66121 +static void tasklet_hi_action(void)
66122 {
66123 struct tasklet_struct *list;
66124
66125 diff --git a/kernel/sys.c b/kernel/sys.c
66126 index 481611f..4665125 100644
66127 --- a/kernel/sys.c
66128 +++ b/kernel/sys.c
66129 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66130 error = -EACCES;
66131 goto out;
66132 }
66133 +
66134 + if (gr_handle_chroot_setpriority(p, niceval)) {
66135 + error = -EACCES;
66136 + goto out;
66137 + }
66138 +
66139 no_nice = security_task_setnice(p, niceval);
66140 if (no_nice) {
66141 error = no_nice;
66142 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66143 goto error;
66144 }
66145
66146 + if (gr_check_group_change(new->gid, new->egid, -1))
66147 + goto error;
66148 +
66149 if (rgid != (gid_t) -1 ||
66150 (egid != (gid_t) -1 && egid != old->gid))
66151 new->sgid = new->egid;
66152 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66153 old = current_cred();
66154
66155 retval = -EPERM;
66156 +
66157 + if (gr_check_group_change(gid, gid, gid))
66158 + goto error;
66159 +
66160 if (nsown_capable(CAP_SETGID))
66161 new->gid = new->egid = new->sgid = new->fsgid = gid;
66162 else if (gid == old->gid || gid == old->sgid)
66163 @@ -618,7 +631,7 @@ error:
66164 /*
66165 * change the user struct in a credentials set to match the new UID
66166 */
66167 -static int set_user(struct cred *new)
66168 +int set_user(struct cred *new)
66169 {
66170 struct user_struct *new_user;
66171
66172 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66173 goto error;
66174 }
66175
66176 + if (gr_check_user_change(new->uid, new->euid, -1))
66177 + goto error;
66178 +
66179 if (new->uid != old->uid) {
66180 retval = set_user(new);
66181 if (retval < 0)
66182 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66183 old = current_cred();
66184
66185 retval = -EPERM;
66186 +
66187 + if (gr_check_crash_uid(uid))
66188 + goto error;
66189 + if (gr_check_user_change(uid, uid, uid))
66190 + goto error;
66191 +
66192 if (nsown_capable(CAP_SETUID)) {
66193 new->suid = new->uid = uid;
66194 if (uid != old->uid) {
66195 @@ -775,9 +797,18 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66196
66197 retval = -EPERM;
66198 if (!nsown_capable(CAP_SETUID)) {
66199 - if (ruid != (uid_t) -1 && ruid != old->uid &&
66200 - ruid != old->euid && ruid != old->suid)
66201 - goto error;
66202 + // if RBAC is enabled, require CAP_SETUID to change
66203 + // uid to euid (from a suid binary, for instance)
66204 + // this is a hardening of normal permissions, not
66205 + // weakening
66206 + if (gr_acl_is_enabled()) {
66207 + if (ruid != (uid_t) -1 && ruid != old->uid)
66208 + goto error;
66209 + } else {
66210 + if (ruid != (uid_t) -1 && ruid != old->uid &&
66211 + ruid != old->euid && ruid != old->suid)
66212 + goto error;
66213 + }
66214 if (euid != (uid_t) -1 && euid != old->uid &&
66215 euid != old->euid && euid != old->suid)
66216 goto error;
66217 @@ -786,6 +817,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66218 goto error;
66219 }
66220
66221 + if (gr_check_user_change(ruid, euid, -1))
66222 + goto error;
66223 +
66224 if (ruid != (uid_t) -1) {
66225 new->uid = ruid;
66226 if (ruid != old->uid) {
66227 @@ -839,9 +873,18 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66228
66229 retval = -EPERM;
66230 if (!nsown_capable(CAP_SETGID)) {
66231 - if (rgid != (gid_t) -1 && rgid != old->gid &&
66232 - rgid != old->egid && rgid != old->sgid)
66233 - goto error;
66234 + // if RBAC is enabled, require CAP_SETGID to change
66235 + // gid to egid (from a sgid binary, for instance)
66236 + // this is a hardening of normal permissions, not
66237 + // weakening
66238 + if (gr_acl_is_enabled()) {
66239 + if (rgid != (gid_t) -1 && rgid != old->gid)
66240 + goto error;
66241 + } else {
66242 + if (rgid != (gid_t) -1 && rgid != old->gid &&
66243 + rgid != old->egid && rgid != old->sgid)
66244 + goto error;
66245 + }
66246 if (egid != (gid_t) -1 && egid != old->gid &&
66247 egid != old->egid && egid != old->sgid)
66248 goto error;
66249 @@ -850,6 +893,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66250 goto error;
66251 }
66252
66253 + if (gr_check_group_change(rgid, egid, -1))
66254 + goto error;
66255 +
66256 if (rgid != (gid_t) -1)
66257 new->gid = rgid;
66258 if (egid != (gid_t) -1)
66259 @@ -896,6 +942,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66260 old = current_cred();
66261 old_fsuid = old->fsuid;
66262
66263 + if (gr_check_user_change(-1, -1, uid))
66264 + goto error;
66265 +
66266 if (uid == old->uid || uid == old->euid ||
66267 uid == old->suid || uid == old->fsuid ||
66268 nsown_capable(CAP_SETUID)) {
66269 @@ -906,6 +955,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66270 }
66271 }
66272
66273 +error:
66274 abort_creds(new);
66275 return old_fsuid;
66276
66277 @@ -932,12 +982,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66278 if (gid == old->gid || gid == old->egid ||
66279 gid == old->sgid || gid == old->fsgid ||
66280 nsown_capable(CAP_SETGID)) {
66281 + if (gr_check_group_change(-1, -1, gid))
66282 + goto error;
66283 +
66284 if (gid != old_fsgid) {
66285 new->fsgid = gid;
66286 goto change_okay;
66287 }
66288 }
66289
66290 +error:
66291 abort_creds(new);
66292 return old_fsgid;
66293
66294 @@ -1189,7 +1243,10 @@ static int override_release(char __user *release, int len)
66295 }
66296 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66297 snprintf(buf, len, "2.6.%u%s", v, rest);
66298 - ret = copy_to_user(release, buf, len);
66299 + if (len > sizeof(buf))
66300 + ret = -EFAULT;
66301 + else
66302 + ret = copy_to_user(release, buf, len);
66303 }
66304 return ret;
66305 }
66306 @@ -1243,19 +1300,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66307 return -EFAULT;
66308
66309 down_read(&uts_sem);
66310 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
66311 + error = __copy_to_user(name->sysname, &utsname()->sysname,
66312 __OLD_UTS_LEN);
66313 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66314 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66315 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
66316 __OLD_UTS_LEN);
66317 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66318 - error |= __copy_to_user(&name->release, &utsname()->release,
66319 + error |= __copy_to_user(name->release, &utsname()->release,
66320 __OLD_UTS_LEN);
66321 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66322 - error |= __copy_to_user(&name->version, &utsname()->version,
66323 + error |= __copy_to_user(name->version, &utsname()->version,
66324 __OLD_UTS_LEN);
66325 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66326 - error |= __copy_to_user(&name->machine, &utsname()->machine,
66327 + error |= __copy_to_user(name->machine, &utsname()->machine,
66328 __OLD_UTS_LEN);
66329 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66330 up_read(&uts_sem);
66331 @@ -1720,7 +1777,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66332 error = get_dumpable(me->mm);
66333 break;
66334 case PR_SET_DUMPABLE:
66335 - if (arg2 < 0 || arg2 > 1) {
66336 + if (arg2 > 1) {
66337 error = -EINVAL;
66338 break;
66339 }
66340 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66341 index ae27196..7506d69 100644
66342 --- a/kernel/sysctl.c
66343 +++ b/kernel/sysctl.c
66344 @@ -86,6 +86,13 @@
66345
66346
66347 #if defined(CONFIG_SYSCTL)
66348 +#include <linux/grsecurity.h>
66349 +#include <linux/grinternal.h>
66350 +
66351 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66352 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66353 + const int op);
66354 +extern int gr_handle_chroot_sysctl(const int op);
66355
66356 /* External variables not in a header file. */
66357 extern int sysctl_overcommit_memory;
66358 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66359 }
66360
66361 #endif
66362 +extern struct ctl_table grsecurity_table[];
66363
66364 static struct ctl_table root_table[];
66365 static struct ctl_table_root sysctl_table_root;
66366 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66367 int sysctl_legacy_va_layout;
66368 #endif
66369
66370 +#ifdef CONFIG_PAX_SOFTMODE
66371 +static ctl_table pax_table[] = {
66372 + {
66373 + .procname = "softmode",
66374 + .data = &pax_softmode,
66375 + .maxlen = sizeof(unsigned int),
66376 + .mode = 0600,
66377 + .proc_handler = &proc_dointvec,
66378 + },
66379 +
66380 + { }
66381 +};
66382 +#endif
66383 +
66384 /* The default sysctl tables: */
66385
66386 static struct ctl_table root_table[] = {
66387 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66388 #endif
66389
66390 static struct ctl_table kern_table[] = {
66391 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66392 + {
66393 + .procname = "grsecurity",
66394 + .mode = 0500,
66395 + .child = grsecurity_table,
66396 + },
66397 +#endif
66398 +
66399 +#ifdef CONFIG_PAX_SOFTMODE
66400 + {
66401 + .procname = "pax",
66402 + .mode = 0500,
66403 + .child = pax_table,
66404 + },
66405 +#endif
66406 +
66407 {
66408 .procname = "sched_child_runs_first",
66409 .data = &sysctl_sched_child_runs_first,
66410 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66411 .data = &modprobe_path,
66412 .maxlen = KMOD_PATH_LEN,
66413 .mode = 0644,
66414 - .proc_handler = proc_dostring,
66415 + .proc_handler = proc_dostring_modpriv,
66416 },
66417 {
66418 .procname = "modules_disabled",
66419 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66420 .extra1 = &zero,
66421 .extra2 = &one,
66422 },
66423 +#endif
66424 {
66425 .procname = "kptr_restrict",
66426 .data = &kptr_restrict,
66427 .maxlen = sizeof(int),
66428 .mode = 0644,
66429 .proc_handler = proc_dmesg_restrict,
66430 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66431 + .extra1 = &two,
66432 +#else
66433 .extra1 = &zero,
66434 +#endif
66435 .extra2 = &two,
66436 },
66437 -#endif
66438 {
66439 .procname = "ngroups_max",
66440 .data = &ngroups_max,
66441 @@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66442 .proc_handler = proc_dointvec_minmax,
66443 .extra1 = &zero,
66444 },
66445 + {
66446 + .procname = "heap_stack_gap",
66447 + .data = &sysctl_heap_stack_gap,
66448 + .maxlen = sizeof(sysctl_heap_stack_gap),
66449 + .mode = 0644,
66450 + .proc_handler = proc_doulongvec_minmax,
66451 + },
66452 #else
66453 {
66454 .procname = "nr_trim_pages",
66455 @@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66456 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66457 {
66458 int mode;
66459 + int error;
66460 +
66461 + if (table->parent != NULL && table->parent->procname != NULL &&
66462 + table->procname != NULL &&
66463 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66464 + return -EACCES;
66465 + if (gr_handle_chroot_sysctl(op))
66466 + return -EACCES;
66467 + error = gr_handle_sysctl(table, op);
66468 + if (error)
66469 + return error;
66470
66471 if (root->permissions)
66472 mode = root->permissions(root, current->nsproxy, table);
66473 @@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66474 buffer, lenp, ppos);
66475 }
66476
66477 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66478 + void __user *buffer, size_t *lenp, loff_t *ppos)
66479 +{
66480 + if (write && !capable(CAP_SYS_MODULE))
66481 + return -EPERM;
66482 +
66483 + return _proc_do_string(table->data, table->maxlen, write,
66484 + buffer, lenp, ppos);
66485 +}
66486 +
66487 static size_t proc_skip_spaces(char **buf)
66488 {
66489 size_t ret;
66490 @@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66491 len = strlen(tmp);
66492 if (len > *size)
66493 len = *size;
66494 + if (len > sizeof(tmp))
66495 + len = sizeof(tmp);
66496 if (copy_to_user(*buf, tmp, len))
66497 return -EFAULT;
66498 *size -= len;
66499 @@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66500 *i = val;
66501 } else {
66502 val = convdiv * (*i) / convmul;
66503 - if (!first)
66504 + if (!first) {
66505 err = proc_put_char(&buffer, &left, '\t');
66506 + if (err)
66507 + break;
66508 + }
66509 err = proc_put_long(&buffer, &left, val, false);
66510 if (err)
66511 break;
66512 @@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66513 return -ENOSYS;
66514 }
66515
66516 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66517 + void __user *buffer, size_t *lenp, loff_t *ppos)
66518 +{
66519 + return -ENOSYS;
66520 +}
66521 +
66522 int proc_dointvec(struct ctl_table *table, int write,
66523 void __user *buffer, size_t *lenp, loff_t *ppos)
66524 {
66525 @@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66526 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66527 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66528 EXPORT_SYMBOL(proc_dostring);
66529 +EXPORT_SYMBOL(proc_dostring_modpriv);
66530 EXPORT_SYMBOL(proc_doulongvec_minmax);
66531 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66532 EXPORT_SYMBOL(register_sysctl_table);
66533 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66534 index a650694..aaeeb20 100644
66535 --- a/kernel/sysctl_binary.c
66536 +++ b/kernel/sysctl_binary.c
66537 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66538 int i;
66539
66540 set_fs(KERNEL_DS);
66541 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66542 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66543 set_fs(old_fs);
66544 if (result < 0)
66545 goto out_kfree;
66546 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66547 }
66548
66549 set_fs(KERNEL_DS);
66550 - result = vfs_write(file, buffer, str - buffer, &pos);
66551 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66552 set_fs(old_fs);
66553 if (result < 0)
66554 goto out_kfree;
66555 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66556 int i;
66557
66558 set_fs(KERNEL_DS);
66559 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66560 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66561 set_fs(old_fs);
66562 if (result < 0)
66563 goto out_kfree;
66564 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66565 }
66566
66567 set_fs(KERNEL_DS);
66568 - result = vfs_write(file, buffer, str - buffer, &pos);
66569 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66570 set_fs(old_fs);
66571 if (result < 0)
66572 goto out_kfree;
66573 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66574 int i;
66575
66576 set_fs(KERNEL_DS);
66577 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66578 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66579 set_fs(old_fs);
66580 if (result < 0)
66581 goto out;
66582 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66583 __le16 dnaddr;
66584
66585 set_fs(KERNEL_DS);
66586 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66587 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66588 set_fs(old_fs);
66589 if (result < 0)
66590 goto out;
66591 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66592 le16_to_cpu(dnaddr) & 0x3ff);
66593
66594 set_fs(KERNEL_DS);
66595 - result = vfs_write(file, buf, len, &pos);
66596 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66597 set_fs(old_fs);
66598 if (result < 0)
66599 goto out;
66600 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
66601 index 362da65..ab8ef8c 100644
66602 --- a/kernel/sysctl_check.c
66603 +++ b/kernel/sysctl_check.c
66604 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
66605 set_fail(&fail, table, "Directory with extra2");
66606 } else {
66607 if ((table->proc_handler == proc_dostring) ||
66608 + (table->proc_handler == proc_dostring_modpriv) ||
66609 (table->proc_handler == proc_dointvec) ||
66610 (table->proc_handler == proc_dointvec_minmax) ||
66611 (table->proc_handler == proc_dointvec_jiffies) ||
66612 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
66613 index e660464..c8b9e67 100644
66614 --- a/kernel/taskstats.c
66615 +++ b/kernel/taskstats.c
66616 @@ -27,9 +27,12 @@
66617 #include <linux/cgroup.h>
66618 #include <linux/fs.h>
66619 #include <linux/file.h>
66620 +#include <linux/grsecurity.h>
66621 #include <net/genetlink.h>
66622 #include <linux/atomic.h>
66623
66624 +extern int gr_is_taskstats_denied(int pid);
66625 +
66626 /*
66627 * Maximum length of a cpumask that can be specified in
66628 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66629 @@ -556,6 +559,9 @@ err:
66630
66631 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66632 {
66633 + if (gr_is_taskstats_denied(current->pid))
66634 + return -EACCES;
66635 +
66636 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66637 return cmd_attr_register_cpumask(info);
66638 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66639 diff --git a/kernel/time.c b/kernel/time.c
66640 index 73e416d..cfc6f69 100644
66641 --- a/kernel/time.c
66642 +++ b/kernel/time.c
66643 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
66644 return error;
66645
66646 if (tz) {
66647 + /* we log in do_settimeofday called below, so don't log twice
66648 + */
66649 + if (!tv)
66650 + gr_log_timechange();
66651 +
66652 /* SMP safe, global irq locking makes it work. */
66653 sys_tz = *tz;
66654 update_vsyscall_tz();
66655 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
66656 index 8a46f5d..bbe6f9c 100644
66657 --- a/kernel/time/alarmtimer.c
66658 +++ b/kernel/time/alarmtimer.c
66659 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
66660 struct platform_device *pdev;
66661 int error = 0;
66662 int i;
66663 - struct k_clock alarm_clock = {
66664 + static struct k_clock alarm_clock = {
66665 .clock_getres = alarm_clock_getres,
66666 .clock_get = alarm_clock_get,
66667 .timer_create = alarm_timer_create,
66668 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
66669 index fd4a7b1..fae5c2a 100644
66670 --- a/kernel/time/tick-broadcast.c
66671 +++ b/kernel/time/tick-broadcast.c
66672 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
66673 * then clear the broadcast bit.
66674 */
66675 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66676 - int cpu = smp_processor_id();
66677 + cpu = smp_processor_id();
66678
66679 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66680 tick_broadcast_clear_oneshot(cpu);
66681 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
66682 index 2378413..be455fd 100644
66683 --- a/kernel/time/timekeeping.c
66684 +++ b/kernel/time/timekeeping.c
66685 @@ -14,6 +14,7 @@
66686 #include <linux/init.h>
66687 #include <linux/mm.h>
66688 #include <linux/sched.h>
66689 +#include <linux/grsecurity.h>
66690 #include <linux/syscore_ops.h>
66691 #include <linux/clocksource.h>
66692 #include <linux/jiffies.h>
66693 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
66694 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66695 return -EINVAL;
66696
66697 + gr_log_timechange();
66698 +
66699 write_seqlock_irqsave(&xtime_lock, flags);
66700
66701 timekeeping_forward_now();
66702 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
66703 index 3258455..f35227d 100644
66704 --- a/kernel/time/timer_list.c
66705 +++ b/kernel/time/timer_list.c
66706 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
66707
66708 static void print_name_offset(struct seq_file *m, void *sym)
66709 {
66710 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66711 + SEQ_printf(m, "<%p>", NULL);
66712 +#else
66713 char symname[KSYM_NAME_LEN];
66714
66715 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66716 SEQ_printf(m, "<%pK>", sym);
66717 else
66718 SEQ_printf(m, "%s", symname);
66719 +#endif
66720 }
66721
66722 static void
66723 @@ -112,7 +116,11 @@ next_one:
66724 static void
66725 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66726 {
66727 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66728 + SEQ_printf(m, " .base: %p\n", NULL);
66729 +#else
66730 SEQ_printf(m, " .base: %pK\n", base);
66731 +#endif
66732 SEQ_printf(m, " .index: %d\n",
66733 base->index);
66734 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66735 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
66736 {
66737 struct proc_dir_entry *pe;
66738
66739 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66740 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66741 +#else
66742 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66743 +#endif
66744 if (!pe)
66745 return -ENOMEM;
66746 return 0;
66747 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
66748 index 0b537f2..9e71eca 100644
66749 --- a/kernel/time/timer_stats.c
66750 +++ b/kernel/time/timer_stats.c
66751 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66752 static unsigned long nr_entries;
66753 static struct entry entries[MAX_ENTRIES];
66754
66755 -static atomic_t overflow_count;
66756 +static atomic_unchecked_t overflow_count;
66757
66758 /*
66759 * The entries are in a hash-table, for fast lookup:
66760 @@ -140,7 +140,7 @@ static void reset_entries(void)
66761 nr_entries = 0;
66762 memset(entries, 0, sizeof(entries));
66763 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66764 - atomic_set(&overflow_count, 0);
66765 + atomic_set_unchecked(&overflow_count, 0);
66766 }
66767
66768 static struct entry *alloc_entry(void)
66769 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66770 if (likely(entry))
66771 entry->count++;
66772 else
66773 - atomic_inc(&overflow_count);
66774 + atomic_inc_unchecked(&overflow_count);
66775
66776 out_unlock:
66777 raw_spin_unlock_irqrestore(lock, flags);
66778 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66779
66780 static void print_name_offset(struct seq_file *m, unsigned long addr)
66781 {
66782 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66783 + seq_printf(m, "<%p>", NULL);
66784 +#else
66785 char symname[KSYM_NAME_LEN];
66786
66787 if (lookup_symbol_name(addr, symname) < 0)
66788 seq_printf(m, "<%p>", (void *)addr);
66789 else
66790 seq_printf(m, "%s", symname);
66791 +#endif
66792 }
66793
66794 static int tstats_show(struct seq_file *m, void *v)
66795 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
66796
66797 seq_puts(m, "Timer Stats Version: v0.2\n");
66798 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66799 - if (atomic_read(&overflow_count))
66800 + if (atomic_read_unchecked(&overflow_count))
66801 seq_printf(m, "Overflow: %d entries\n",
66802 - atomic_read(&overflow_count));
66803 + atomic_read_unchecked(&overflow_count));
66804
66805 for (i = 0; i < nr_entries; i++) {
66806 entry = entries + i;
66807 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
66808 {
66809 struct proc_dir_entry *pe;
66810
66811 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66812 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66813 +#else
66814 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66815 +#endif
66816 if (!pe)
66817 return -ENOMEM;
66818 return 0;
66819 diff --git a/kernel/timer.c b/kernel/timer.c
66820 index 9c3c62b..441690e 100644
66821 --- a/kernel/timer.c
66822 +++ b/kernel/timer.c
66823 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66824 /*
66825 * This function runs timers and the timer-tq in bottom half context.
66826 */
66827 -static void run_timer_softirq(struct softirq_action *h)
66828 +static void run_timer_softirq(void)
66829 {
66830 struct tvec_base *base = __this_cpu_read(tvec_bases);
66831
66832 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
66833 index 16fc34a..efd8bb8 100644
66834 --- a/kernel/trace/blktrace.c
66835 +++ b/kernel/trace/blktrace.c
66836 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
66837 struct blk_trace *bt = filp->private_data;
66838 char buf[16];
66839
66840 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66841 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66842
66843 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66844 }
66845 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
66846 return 1;
66847
66848 bt = buf->chan->private_data;
66849 - atomic_inc(&bt->dropped);
66850 + atomic_inc_unchecked(&bt->dropped);
66851 return 0;
66852 }
66853
66854 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
66855
66856 bt->dir = dir;
66857 bt->dev = dev;
66858 - atomic_set(&bt->dropped, 0);
66859 + atomic_set_unchecked(&bt->dropped, 0);
66860
66861 ret = -EIO;
66862 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66863 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
66864 index 25b4f4d..6f4772d 100644
66865 --- a/kernel/trace/ftrace.c
66866 +++ b/kernel/trace/ftrace.c
66867 @@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
66868 if (unlikely(ftrace_disabled))
66869 return 0;
66870
66871 + ret = ftrace_arch_code_modify_prepare();
66872 + FTRACE_WARN_ON(ret);
66873 + if (ret)
66874 + return 0;
66875 +
66876 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66877 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66878 if (ret) {
66879 ftrace_bug(ret, ip);
66880 - return 0;
66881 }
66882 - return 1;
66883 + return ret ? 0 : 1;
66884 }
66885
66886 /*
66887 @@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
66888
66889 int
66890 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66891 - void *data)
66892 + void *data)
66893 {
66894 struct ftrace_func_probe *entry;
66895 struct ftrace_page *pg;
66896 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
66897 index f2bd275..adaf3a2 100644
66898 --- a/kernel/trace/trace.c
66899 +++ b/kernel/trace/trace.c
66900 @@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
66901 };
66902 #endif
66903
66904 -static struct dentry *d_tracer;
66905 -
66906 struct dentry *tracing_init_dentry(void)
66907 {
66908 + static struct dentry *d_tracer;
66909 static int once;
66910
66911 if (d_tracer)
66912 @@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
66913 return d_tracer;
66914 }
66915
66916 -static struct dentry *d_percpu;
66917 -
66918 struct dentry *tracing_dentry_percpu(void)
66919 {
66920 + static struct dentry *d_percpu;
66921 static int once;
66922 struct dentry *d_tracer;
66923
66924 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
66925 index c212a7f..7b02394 100644
66926 --- a/kernel/trace/trace_events.c
66927 +++ b/kernel/trace/trace_events.c
66928 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
66929 struct ftrace_module_file_ops {
66930 struct list_head list;
66931 struct module *mod;
66932 - struct file_operations id;
66933 - struct file_operations enable;
66934 - struct file_operations format;
66935 - struct file_operations filter;
66936 };
66937
66938 static struct ftrace_module_file_ops *
66939 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
66940
66941 file_ops->mod = mod;
66942
66943 - file_ops->id = ftrace_event_id_fops;
66944 - file_ops->id.owner = mod;
66945 -
66946 - file_ops->enable = ftrace_enable_fops;
66947 - file_ops->enable.owner = mod;
66948 -
66949 - file_ops->filter = ftrace_event_filter_fops;
66950 - file_ops->filter.owner = mod;
66951 -
66952 - file_ops->format = ftrace_event_format_fops;
66953 - file_ops->format.owner = mod;
66954 + pax_open_kernel();
66955 + *(void **)&mod->trace_id.owner = mod;
66956 + *(void **)&mod->trace_enable.owner = mod;
66957 + *(void **)&mod->trace_filter.owner = mod;
66958 + *(void **)&mod->trace_format.owner = mod;
66959 + pax_close_kernel();
66960
66961 list_add(&file_ops->list, &ftrace_module_file_list);
66962
66963 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
66964
66965 for_each_event(call, start, end) {
66966 __trace_add_event_call(*call, mod,
66967 - &file_ops->id, &file_ops->enable,
66968 - &file_ops->filter, &file_ops->format);
66969 + &mod->trace_id, &mod->trace_enable,
66970 + &mod->trace_filter, &mod->trace_format);
66971 }
66972 }
66973
66974 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
66975 index 00d527c..7c5b1a3 100644
66976 --- a/kernel/trace/trace_kprobe.c
66977 +++ b/kernel/trace/trace_kprobe.c
66978 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
66979 long ret;
66980 int maxlen = get_rloc_len(*(u32 *)dest);
66981 u8 *dst = get_rloc_data(dest);
66982 - u8 *src = addr;
66983 + const u8 __user *src = (const u8 __force_user *)addr;
66984 mm_segment_t old_fs = get_fs();
66985 if (!maxlen)
66986 return;
66987 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
66988 pagefault_disable();
66989 do
66990 ret = __copy_from_user_inatomic(dst++, src++, 1);
66991 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
66992 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
66993 dst[-1] = '\0';
66994 pagefault_enable();
66995 set_fs(old_fs);
66996 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
66997 ((u8 *)get_rloc_data(dest))[0] = '\0';
66998 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
66999 } else
67000 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67001 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67002 get_rloc_offs(*(u32 *)dest));
67003 }
67004 /* Return the length of string -- including null terminal byte */
67005 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67006 set_fs(KERNEL_DS);
67007 pagefault_disable();
67008 do {
67009 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67010 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67011 len++;
67012 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67013 pagefault_enable();
67014 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67015 index fd3c8aa..5f324a6 100644
67016 --- a/kernel/trace/trace_mmiotrace.c
67017 +++ b/kernel/trace/trace_mmiotrace.c
67018 @@ -24,7 +24,7 @@ struct header_iter {
67019 static struct trace_array *mmio_trace_array;
67020 static bool overrun_detected;
67021 static unsigned long prev_overruns;
67022 -static atomic_t dropped_count;
67023 +static atomic_unchecked_t dropped_count;
67024
67025 static void mmio_reset_data(struct trace_array *tr)
67026 {
67027 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67028
67029 static unsigned long count_overruns(struct trace_iterator *iter)
67030 {
67031 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
67032 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67033 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67034
67035 if (over > prev_overruns)
67036 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67037 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67038 sizeof(*entry), 0, pc);
67039 if (!event) {
67040 - atomic_inc(&dropped_count);
67041 + atomic_inc_unchecked(&dropped_count);
67042 return;
67043 }
67044 entry = ring_buffer_event_data(event);
67045 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67046 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67047 sizeof(*entry), 0, pc);
67048 if (!event) {
67049 - atomic_inc(&dropped_count);
67050 + atomic_inc_unchecked(&dropped_count);
67051 return;
67052 }
67053 entry = ring_buffer_event_data(event);
67054 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67055 index 5199930..26c73a0 100644
67056 --- a/kernel/trace/trace_output.c
67057 +++ b/kernel/trace/trace_output.c
67058 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67059
67060 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67061 if (!IS_ERR(p)) {
67062 - p = mangle_path(s->buffer + s->len, p, "\n");
67063 + p = mangle_path(s->buffer + s->len, p, "\n\\");
67064 if (p) {
67065 s->len = p - s->buffer;
67066 return 1;
67067 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67068 index 77575b3..6e623d1 100644
67069 --- a/kernel/trace/trace_stack.c
67070 +++ b/kernel/trace/trace_stack.c
67071 @@ -50,7 +50,7 @@ static inline void check_stack(void)
67072 return;
67073
67074 /* we do not handle interrupt stacks yet */
67075 - if (!object_is_on_stack(&this_size))
67076 + if (!object_starts_on_stack(&this_size))
67077 return;
67078
67079 local_irq_save(flags);
67080 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67081 index 209b379..7f76423 100644
67082 --- a/kernel/trace/trace_workqueue.c
67083 +++ b/kernel/trace/trace_workqueue.c
67084 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67085 int cpu;
67086 pid_t pid;
67087 /* Can be inserted from interrupt or user context, need to be atomic */
67088 - atomic_t inserted;
67089 + atomic_unchecked_t inserted;
67090 /*
67091 * Don't need to be atomic, works are serialized in a single workqueue thread
67092 * on a single CPU.
67093 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67094 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67095 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67096 if (node->pid == wq_thread->pid) {
67097 - atomic_inc(&node->inserted);
67098 + atomic_inc_unchecked(&node->inserted);
67099 goto found;
67100 }
67101 }
67102 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67103 tsk = get_pid_task(pid, PIDTYPE_PID);
67104 if (tsk) {
67105 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67106 - atomic_read(&cws->inserted), cws->executed,
67107 + atomic_read_unchecked(&cws->inserted), cws->executed,
67108 tsk->comm);
67109 put_task_struct(tsk);
67110 }
67111 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67112 index 82928f5..92da771 100644
67113 --- a/lib/Kconfig.debug
67114 +++ b/lib/Kconfig.debug
67115 @@ -1103,6 +1103,7 @@ config LATENCYTOP
67116 depends on DEBUG_KERNEL
67117 depends on STACKTRACE_SUPPORT
67118 depends on PROC_FS
67119 + depends on !GRKERNSEC_HIDESYM
67120 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67121 select KALLSYMS
67122 select KALLSYMS_ALL
67123 diff --git a/lib/bitmap.c b/lib/bitmap.c
67124 index 0d4a127..33a06c7 100644
67125 --- a/lib/bitmap.c
67126 +++ b/lib/bitmap.c
67127 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67128 {
67129 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67130 u32 chunk;
67131 - const char __user __force *ubuf = (const char __user __force *)buf;
67132 + const char __user *ubuf = (const char __force_user *)buf;
67133
67134 bitmap_zero(maskp, nmaskbits);
67135
67136 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67137 {
67138 if (!access_ok(VERIFY_READ, ubuf, ulen))
67139 return -EFAULT;
67140 - return __bitmap_parse((const char __force *)ubuf,
67141 + return __bitmap_parse((const char __force_kernel *)ubuf,
67142 ulen, 1, maskp, nmaskbits);
67143
67144 }
67145 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67146 {
67147 unsigned a, b;
67148 int c, old_c, totaldigits;
67149 - const char __user __force *ubuf = (const char __user __force *)buf;
67150 + const char __user *ubuf = (const char __force_user *)buf;
67151 int exp_digit, in_range;
67152
67153 totaldigits = c = 0;
67154 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67155 {
67156 if (!access_ok(VERIFY_READ, ubuf, ulen))
67157 return -EFAULT;
67158 - return __bitmap_parselist((const char __force *)ubuf,
67159 + return __bitmap_parselist((const char __force_kernel *)ubuf,
67160 ulen, 1, maskp, nmaskbits);
67161 }
67162 EXPORT_SYMBOL(bitmap_parselist_user);
67163 diff --git a/lib/bug.c b/lib/bug.c
67164 index 1955209..cbbb2ad 100644
67165 --- a/lib/bug.c
67166 +++ b/lib/bug.c
67167 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67168 return BUG_TRAP_TYPE_NONE;
67169
67170 bug = find_bug(bugaddr);
67171 + if (!bug)
67172 + return BUG_TRAP_TYPE_NONE;
67173
67174 file = NULL;
67175 line = 0;
67176 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67177 index a78b7c6..2c73084 100644
67178 --- a/lib/debugobjects.c
67179 +++ b/lib/debugobjects.c
67180 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67181 if (limit > 4)
67182 return;
67183
67184 - is_on_stack = object_is_on_stack(addr);
67185 + is_on_stack = object_starts_on_stack(addr);
67186 if (is_on_stack == onstack)
67187 return;
67188
67189 diff --git a/lib/devres.c b/lib/devres.c
67190 index 7c0e953..f642b5c 100644
67191 --- a/lib/devres.c
67192 +++ b/lib/devres.c
67193 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67194 void devm_iounmap(struct device *dev, void __iomem *addr)
67195 {
67196 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67197 - (void *)addr));
67198 + (void __force *)addr));
67199 iounmap(addr);
67200 }
67201 EXPORT_SYMBOL(devm_iounmap);
67202 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67203 {
67204 ioport_unmap(addr);
67205 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67206 - devm_ioport_map_match, (void *)addr));
67207 + devm_ioport_map_match, (void __force *)addr));
67208 }
67209 EXPORT_SYMBOL(devm_ioport_unmap);
67210
67211 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67212 index fea790a..ebb0e82 100644
67213 --- a/lib/dma-debug.c
67214 +++ b/lib/dma-debug.c
67215 @@ -925,7 +925,7 @@ out:
67216
67217 static void check_for_stack(struct device *dev, void *addr)
67218 {
67219 - if (object_is_on_stack(addr))
67220 + if (object_starts_on_stack(addr))
67221 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67222 "stack [addr=%p]\n", addr);
67223 }
67224 diff --git a/lib/extable.c b/lib/extable.c
67225 index 4cac81e..63e9b8f 100644
67226 --- a/lib/extable.c
67227 +++ b/lib/extable.c
67228 @@ -13,6 +13,7 @@
67229 #include <linux/init.h>
67230 #include <linux/sort.h>
67231 #include <asm/uaccess.h>
67232 +#include <asm/pgtable.h>
67233
67234 #ifndef ARCH_HAS_SORT_EXTABLE
67235 /*
67236 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67237 void sort_extable(struct exception_table_entry *start,
67238 struct exception_table_entry *finish)
67239 {
67240 + pax_open_kernel();
67241 sort(start, finish - start, sizeof(struct exception_table_entry),
67242 cmp_ex, NULL);
67243 + pax_close_kernel();
67244 }
67245
67246 #ifdef CONFIG_MODULES
67247 diff --git a/lib/inflate.c b/lib/inflate.c
67248 index 013a761..c28f3fc 100644
67249 --- a/lib/inflate.c
67250 +++ b/lib/inflate.c
67251 @@ -269,7 +269,7 @@ static void free(void *where)
67252 malloc_ptr = free_mem_ptr;
67253 }
67254 #else
67255 -#define malloc(a) kmalloc(a, GFP_KERNEL)
67256 +#define malloc(a) kmalloc((a), GFP_KERNEL)
67257 #define free(a) kfree(a)
67258 #endif
67259
67260 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67261 index bd2bea9..6b3c95e 100644
67262 --- a/lib/is_single_threaded.c
67263 +++ b/lib/is_single_threaded.c
67264 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67265 struct task_struct *p, *t;
67266 bool ret;
67267
67268 + if (!mm)
67269 + return true;
67270 +
67271 if (atomic_read(&task->signal->live) != 1)
67272 return false;
67273
67274 diff --git a/lib/kref.c b/lib/kref.c
67275 index 3efb882..8492f4c 100644
67276 --- a/lib/kref.c
67277 +++ b/lib/kref.c
67278 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67279 */
67280 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67281 {
67282 - WARN_ON(release == NULL);
67283 + BUG_ON(release == NULL);
67284 WARN_ON(release == (void (*)(struct kref *))kfree);
67285
67286 if (atomic_dec_and_test(&kref->refcount)) {
67287 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67288 index d9df745..e73c2fe 100644
67289 --- a/lib/radix-tree.c
67290 +++ b/lib/radix-tree.c
67291 @@ -80,7 +80,7 @@ struct radix_tree_preload {
67292 int nr;
67293 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67294 };
67295 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67296 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67297
67298 static inline void *ptr_to_indirect(void *ptr)
67299 {
67300 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67301 index 993599e..84dc70e 100644
67302 --- a/lib/vsprintf.c
67303 +++ b/lib/vsprintf.c
67304 @@ -16,6 +16,9 @@
67305 * - scnprintf and vscnprintf
67306 */
67307
67308 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67309 +#define __INCLUDED_BY_HIDESYM 1
67310 +#endif
67311 #include <stdarg.h>
67312 #include <linux/module.h>
67313 #include <linux/types.h>
67314 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67315 char sym[KSYM_SYMBOL_LEN];
67316 if (ext == 'B')
67317 sprint_backtrace(sym, value);
67318 - else if (ext != 'f' && ext != 's')
67319 + else if (ext != 'f' && ext != 's' && ext != 'a')
67320 sprint_symbol(sym, value);
67321 else
67322 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67323 @@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67324 return string(buf, end, uuid, spec);
67325 }
67326
67327 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67328 +int kptr_restrict __read_mostly = 2;
67329 +#else
67330 int kptr_restrict __read_mostly;
67331 +#endif
67332
67333 /*
67334 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67335 @@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67336 * - 'S' For symbolic direct pointers with offset
67337 * - 's' For symbolic direct pointers without offset
67338 * - 'B' For backtraced symbolic direct pointers with offset
67339 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67340 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67341 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67342 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67343 * - 'M' For a 6-byte MAC address, it prints the address in the
67344 @@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67345 {
67346 if (!ptr && *fmt != 'K') {
67347 /*
67348 - * Print (null) with the same width as a pointer so it makes
67349 + * Print (nil) with the same width as a pointer so it makes
67350 * tabular output look nice.
67351 */
67352 if (spec.field_width == -1)
67353 spec.field_width = 2 * sizeof(void *);
67354 - return string(buf, end, "(null)", spec);
67355 + return string(buf, end, "(nil)", spec);
67356 }
67357
67358 switch (*fmt) {
67359 @@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67360 /* Fallthrough */
67361 case 'S':
67362 case 's':
67363 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67364 + break;
67365 +#else
67366 + return symbol_string(buf, end, ptr, spec, *fmt);
67367 +#endif
67368 + case 'A':
67369 + case 'a':
67370 case 'B':
67371 return symbol_string(buf, end, ptr, spec, *fmt);
67372 case 'R':
67373 @@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67374 typeof(type) value; \
67375 if (sizeof(type) == 8) { \
67376 args = PTR_ALIGN(args, sizeof(u32)); \
67377 - *(u32 *)&value = *(u32 *)args; \
67378 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67379 + *(u32 *)&value = *(const u32 *)args; \
67380 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67381 } else { \
67382 args = PTR_ALIGN(args, sizeof(type)); \
67383 - value = *(typeof(type) *)args; \
67384 + value = *(const typeof(type) *)args; \
67385 } \
67386 args += sizeof(type); \
67387 value; \
67388 @@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67389 case FORMAT_TYPE_STR: {
67390 const char *str_arg = args;
67391 args += strlen(str_arg) + 1;
67392 - str = string(str, end, (char *)str_arg, spec);
67393 + str = string(str, end, str_arg, spec);
67394 break;
67395 }
67396
67397 diff --git a/localversion-grsec b/localversion-grsec
67398 new file mode 100644
67399 index 0000000..7cd6065
67400 --- /dev/null
67401 +++ b/localversion-grsec
67402 @@ -0,0 +1 @@
67403 +-grsec
67404 diff --git a/mm/Kconfig b/mm/Kconfig
67405 index 011b110..b492af2 100644
67406 --- a/mm/Kconfig
67407 +++ b/mm/Kconfig
67408 @@ -241,10 +241,10 @@ config KSM
67409 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67410
67411 config DEFAULT_MMAP_MIN_ADDR
67412 - int "Low address space to protect from user allocation"
67413 + int "Low address space to protect from user allocation"
67414 depends on MMU
67415 - default 4096
67416 - help
67417 + default 65536
67418 + help
67419 This is the portion of low virtual memory which should be protected
67420 from userspace allocation. Keeping a user from writing to low pages
67421 can help reduce the impact of kernel NULL pointer bugs.
67422 diff --git a/mm/filemap.c b/mm/filemap.c
67423 index 90286a4..f441caa 100644
67424 --- a/mm/filemap.c
67425 +++ b/mm/filemap.c
67426 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67427 struct address_space *mapping = file->f_mapping;
67428
67429 if (!mapping->a_ops->readpage)
67430 - return -ENOEXEC;
67431 + return -ENODEV;
67432 file_accessed(file);
67433 vma->vm_ops = &generic_file_vm_ops;
67434 vma->vm_flags |= VM_CAN_NONLINEAR;
67435 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67436 *pos = i_size_read(inode);
67437
67438 if (limit != RLIM_INFINITY) {
67439 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67440 if (*pos >= limit) {
67441 send_sig(SIGXFSZ, current, 0);
67442 return -EFBIG;
67443 diff --git a/mm/fremap.c b/mm/fremap.c
67444 index 9ed4fd4..c42648d 100644
67445 --- a/mm/fremap.c
67446 +++ b/mm/fremap.c
67447 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67448 retry:
67449 vma = find_vma(mm, start);
67450
67451 +#ifdef CONFIG_PAX_SEGMEXEC
67452 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67453 + goto out;
67454 +#endif
67455 +
67456 /*
67457 * Make sure the vma is shared, that it supports prefaulting,
67458 * and that the remapped range is valid and fully within
67459 diff --git a/mm/highmem.c b/mm/highmem.c
67460 index 57d82c6..e9e0552 100644
67461 --- a/mm/highmem.c
67462 +++ b/mm/highmem.c
67463 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67464 * So no dangers, even with speculative execution.
67465 */
67466 page = pte_page(pkmap_page_table[i]);
67467 + pax_open_kernel();
67468 pte_clear(&init_mm, (unsigned long)page_address(page),
67469 &pkmap_page_table[i]);
67470 -
67471 + pax_close_kernel();
67472 set_page_address(page, NULL);
67473 need_flush = 1;
67474 }
67475 @@ -186,9 +187,11 @@ start:
67476 }
67477 }
67478 vaddr = PKMAP_ADDR(last_pkmap_nr);
67479 +
67480 + pax_open_kernel();
67481 set_pte_at(&init_mm, vaddr,
67482 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67483 -
67484 + pax_close_kernel();
67485 pkmap_count[last_pkmap_nr] = 1;
67486 set_page_address(page, (void *)vaddr);
67487
67488 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67489 index 36b3d98..584cb54 100644
67490 --- a/mm/huge_memory.c
67491 +++ b/mm/huge_memory.c
67492 @@ -703,7 +703,7 @@ out:
67493 * run pte_offset_map on the pmd, if an huge pmd could
67494 * materialize from under us from a different thread.
67495 */
67496 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67497 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67498 return VM_FAULT_OOM;
67499 /* if an huge pmd materialized from under us just retry later */
67500 if (unlikely(pmd_trans_huge(*pmd)))
67501 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67502 index 2316840..b418671 100644
67503 --- a/mm/hugetlb.c
67504 +++ b/mm/hugetlb.c
67505 @@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67506 return 1;
67507 }
67508
67509 +#ifdef CONFIG_PAX_SEGMEXEC
67510 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67511 +{
67512 + struct mm_struct *mm = vma->vm_mm;
67513 + struct vm_area_struct *vma_m;
67514 + unsigned long address_m;
67515 + pte_t *ptep_m;
67516 +
67517 + vma_m = pax_find_mirror_vma(vma);
67518 + if (!vma_m)
67519 + return;
67520 +
67521 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67522 + address_m = address + SEGMEXEC_TASK_SIZE;
67523 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67524 + get_page(page_m);
67525 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
67526 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67527 +}
67528 +#endif
67529 +
67530 /*
67531 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67532 */
67533 @@ -2450,6 +2471,11 @@ retry_avoidcopy:
67534 make_huge_pte(vma, new_page, 1));
67535 page_remove_rmap(old_page);
67536 hugepage_add_new_anon_rmap(new_page, vma, address);
67537 +
67538 +#ifdef CONFIG_PAX_SEGMEXEC
67539 + pax_mirror_huge_pte(vma, address, new_page);
67540 +#endif
67541 +
67542 /* Make the old page be freed below */
67543 new_page = old_page;
67544 mmu_notifier_invalidate_range_end(mm,
67545 @@ -2601,6 +2627,10 @@ retry:
67546 && (vma->vm_flags & VM_SHARED)));
67547 set_huge_pte_at(mm, address, ptep, new_pte);
67548
67549 +#ifdef CONFIG_PAX_SEGMEXEC
67550 + pax_mirror_huge_pte(vma, address, page);
67551 +#endif
67552 +
67553 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67554 /* Optimization, do the COW without a second fault */
67555 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67556 @@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67557 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67558 struct hstate *h = hstate_vma(vma);
67559
67560 +#ifdef CONFIG_PAX_SEGMEXEC
67561 + struct vm_area_struct *vma_m;
67562 +#endif
67563 +
67564 ptep = huge_pte_offset(mm, address);
67565 if (ptep) {
67566 entry = huge_ptep_get(ptep);
67567 @@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67568 VM_FAULT_SET_HINDEX(h - hstates);
67569 }
67570
67571 +#ifdef CONFIG_PAX_SEGMEXEC
67572 + vma_m = pax_find_mirror_vma(vma);
67573 + if (vma_m) {
67574 + unsigned long address_m;
67575 +
67576 + if (vma->vm_start > vma_m->vm_start) {
67577 + address_m = address;
67578 + address -= SEGMEXEC_TASK_SIZE;
67579 + vma = vma_m;
67580 + h = hstate_vma(vma);
67581 + } else
67582 + address_m = address + SEGMEXEC_TASK_SIZE;
67583 +
67584 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67585 + return VM_FAULT_OOM;
67586 + address_m &= HPAGE_MASK;
67587 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67588 + }
67589 +#endif
67590 +
67591 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67592 if (!ptep)
67593 return VM_FAULT_OOM;
67594 diff --git a/mm/internal.h b/mm/internal.h
67595 index 2189af4..f2ca332 100644
67596 --- a/mm/internal.h
67597 +++ b/mm/internal.h
67598 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
67599 * in mm/page_alloc.c
67600 */
67601 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67602 +extern void free_compound_page(struct page *page);
67603 extern void prep_compound_page(struct page *page, unsigned long order);
67604 #ifdef CONFIG_MEMORY_FAILURE
67605 extern bool is_free_buddy_page(struct page *page);
67606 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
67607 index f3b2a00..61da94d 100644
67608 --- a/mm/kmemleak.c
67609 +++ b/mm/kmemleak.c
67610 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
67611
67612 for (i = 0; i < object->trace_len; i++) {
67613 void *ptr = (void *)object->trace[i];
67614 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67615 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67616 }
67617 }
67618
67619 diff --git a/mm/maccess.c b/mm/maccess.c
67620 index d53adf9..03a24bf 100644
67621 --- a/mm/maccess.c
67622 +++ b/mm/maccess.c
67623 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
67624 set_fs(KERNEL_DS);
67625 pagefault_disable();
67626 ret = __copy_from_user_inatomic(dst,
67627 - (__force const void __user *)src, size);
67628 + (const void __force_user *)src, size);
67629 pagefault_enable();
67630 set_fs(old_fs);
67631
67632 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
67633
67634 set_fs(KERNEL_DS);
67635 pagefault_disable();
67636 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67637 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67638 pagefault_enable();
67639 set_fs(old_fs);
67640
67641 diff --git a/mm/madvise.c b/mm/madvise.c
67642 index 74bf193..feb6fd3 100644
67643 --- a/mm/madvise.c
67644 +++ b/mm/madvise.c
67645 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
67646 pgoff_t pgoff;
67647 unsigned long new_flags = vma->vm_flags;
67648
67649 +#ifdef CONFIG_PAX_SEGMEXEC
67650 + struct vm_area_struct *vma_m;
67651 +#endif
67652 +
67653 switch (behavior) {
67654 case MADV_NORMAL:
67655 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67656 @@ -110,6 +114,13 @@ success:
67657 /*
67658 * vm_flags is protected by the mmap_sem held in write mode.
67659 */
67660 +
67661 +#ifdef CONFIG_PAX_SEGMEXEC
67662 + vma_m = pax_find_mirror_vma(vma);
67663 + if (vma_m)
67664 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67665 +#endif
67666 +
67667 vma->vm_flags = new_flags;
67668
67669 out:
67670 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67671 struct vm_area_struct ** prev,
67672 unsigned long start, unsigned long end)
67673 {
67674 +
67675 +#ifdef CONFIG_PAX_SEGMEXEC
67676 + struct vm_area_struct *vma_m;
67677 +#endif
67678 +
67679 *prev = vma;
67680 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67681 return -EINVAL;
67682 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67683 zap_page_range(vma, start, end - start, &details);
67684 } else
67685 zap_page_range(vma, start, end - start, NULL);
67686 +
67687 +#ifdef CONFIG_PAX_SEGMEXEC
67688 + vma_m = pax_find_mirror_vma(vma);
67689 + if (vma_m) {
67690 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67691 + struct zap_details details = {
67692 + .nonlinear_vma = vma_m,
67693 + .last_index = ULONG_MAX,
67694 + };
67695 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67696 + } else
67697 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67698 + }
67699 +#endif
67700 +
67701 return 0;
67702 }
67703
67704 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
67705 if (end < start)
67706 goto out;
67707
67708 +#ifdef CONFIG_PAX_SEGMEXEC
67709 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67710 + if (end > SEGMEXEC_TASK_SIZE)
67711 + goto out;
67712 + } else
67713 +#endif
67714 +
67715 + if (end > TASK_SIZE)
67716 + goto out;
67717 +
67718 error = 0;
67719 if (end == start)
67720 goto out;
67721 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
67722 index 06d3479..0778eef 100644
67723 --- a/mm/memory-failure.c
67724 +++ b/mm/memory-failure.c
67725 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
67726
67727 int sysctl_memory_failure_recovery __read_mostly = 1;
67728
67729 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67730 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67731
67732 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67733
67734 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
67735 si.si_signo = SIGBUS;
67736 si.si_errno = 0;
67737 si.si_code = BUS_MCEERR_AO;
67738 - si.si_addr = (void *)addr;
67739 + si.si_addr = (void __user *)addr;
67740 #ifdef __ARCH_SI_TRAPNO
67741 si.si_trapno = trapno;
67742 #endif
67743 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67744 }
67745
67746 nr_pages = 1 << compound_trans_order(hpage);
67747 - atomic_long_add(nr_pages, &mce_bad_pages);
67748 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67749
67750 /*
67751 * We need/can do nothing about count=0 pages.
67752 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67753 if (!PageHWPoison(hpage)
67754 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67755 || (p != hpage && TestSetPageHWPoison(hpage))) {
67756 - atomic_long_sub(nr_pages, &mce_bad_pages);
67757 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67758 return 0;
67759 }
67760 set_page_hwpoison_huge_page(hpage);
67761 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67762 }
67763 if (hwpoison_filter(p)) {
67764 if (TestClearPageHWPoison(p))
67765 - atomic_long_sub(nr_pages, &mce_bad_pages);
67766 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67767 unlock_page(hpage);
67768 put_page(hpage);
67769 return 0;
67770 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
67771 return 0;
67772 }
67773 if (TestClearPageHWPoison(p))
67774 - atomic_long_sub(nr_pages, &mce_bad_pages);
67775 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67776 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67777 return 0;
67778 }
67779 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
67780 */
67781 if (TestClearPageHWPoison(page)) {
67782 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67783 - atomic_long_sub(nr_pages, &mce_bad_pages);
67784 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67785 freeit = 1;
67786 if (PageHuge(page))
67787 clear_page_hwpoison_huge_page(page);
67788 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
67789 }
67790 done:
67791 if (!PageHWPoison(hpage))
67792 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67793 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67794 set_page_hwpoison_huge_page(hpage);
67795 dequeue_hwpoisoned_huge_page(hpage);
67796 /* keep elevated page count for bad page */
67797 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
67798 return ret;
67799
67800 done:
67801 - atomic_long_add(1, &mce_bad_pages);
67802 + atomic_long_add_unchecked(1, &mce_bad_pages);
67803 SetPageHWPoison(page);
67804 /* keep elevated page count for bad page */
67805 return ret;
67806 diff --git a/mm/memory.c b/mm/memory.c
67807 index 829d437..3d3926a 100644
67808 --- a/mm/memory.c
67809 +++ b/mm/memory.c
67810 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
67811 return;
67812
67813 pmd = pmd_offset(pud, start);
67814 +
67815 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67816 pud_clear(pud);
67817 pmd_free_tlb(tlb, pmd, start);
67818 +#endif
67819 +
67820 }
67821
67822 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67823 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67824 if (end - 1 > ceiling - 1)
67825 return;
67826
67827 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67828 pud = pud_offset(pgd, start);
67829 pgd_clear(pgd);
67830 pud_free_tlb(tlb, pud, start);
67831 +#endif
67832 +
67833 }
67834
67835 /*
67836 @@ -1566,12 +1573,6 @@ no_page_table:
67837 return page;
67838 }
67839
67840 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67841 -{
67842 - return stack_guard_page_start(vma, addr) ||
67843 - stack_guard_page_end(vma, addr+PAGE_SIZE);
67844 -}
67845 -
67846 /**
67847 * __get_user_pages() - pin user pages in memory
67848 * @tsk: task_struct of target task
67849 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67850 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67851 i = 0;
67852
67853 - do {
67854 + while (nr_pages) {
67855 struct vm_area_struct *vma;
67856
67857 - vma = find_extend_vma(mm, start);
67858 + vma = find_vma(mm, start);
67859 if (!vma && in_gate_area(mm, start)) {
67860 unsigned long pg = start & PAGE_MASK;
67861 pgd_t *pgd;
67862 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67863 goto next_page;
67864 }
67865
67866 - if (!vma ||
67867 + if (!vma || start < vma->vm_start ||
67868 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67869 !(vm_flags & vma->vm_flags))
67870 return i ? : -EFAULT;
67871 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67872 int ret;
67873 unsigned int fault_flags = 0;
67874
67875 - /* For mlock, just skip the stack guard page. */
67876 - if (foll_flags & FOLL_MLOCK) {
67877 - if (stack_guard_page(vma, start))
67878 - goto next_page;
67879 - }
67880 if (foll_flags & FOLL_WRITE)
67881 fault_flags |= FAULT_FLAG_WRITE;
67882 if (nonblocking)
67883 @@ -1800,7 +1796,7 @@ next_page:
67884 start += PAGE_SIZE;
67885 nr_pages--;
67886 } while (nr_pages && start < vma->vm_end);
67887 - } while (nr_pages);
67888 + }
67889 return i;
67890 }
67891 EXPORT_SYMBOL(__get_user_pages);
67892 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
67893 page_add_file_rmap(page);
67894 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67895
67896 +#ifdef CONFIG_PAX_SEGMEXEC
67897 + pax_mirror_file_pte(vma, addr, page, ptl);
67898 +#endif
67899 +
67900 retval = 0;
67901 pte_unmap_unlock(pte, ptl);
67902 return retval;
67903 @@ -2041,10 +2041,22 @@ out:
67904 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67905 struct page *page)
67906 {
67907 +
67908 +#ifdef CONFIG_PAX_SEGMEXEC
67909 + struct vm_area_struct *vma_m;
67910 +#endif
67911 +
67912 if (addr < vma->vm_start || addr >= vma->vm_end)
67913 return -EFAULT;
67914 if (!page_count(page))
67915 return -EINVAL;
67916 +
67917 +#ifdef CONFIG_PAX_SEGMEXEC
67918 + vma_m = pax_find_mirror_vma(vma);
67919 + if (vma_m)
67920 + vma_m->vm_flags |= VM_INSERTPAGE;
67921 +#endif
67922 +
67923 vma->vm_flags |= VM_INSERTPAGE;
67924 return insert_page(vma, addr, page, vma->vm_page_prot);
67925 }
67926 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
67927 unsigned long pfn)
67928 {
67929 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67930 + BUG_ON(vma->vm_mirror);
67931
67932 if (addr < vma->vm_start || addr >= vma->vm_end)
67933 return -EFAULT;
67934 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
67935 copy_user_highpage(dst, src, va, vma);
67936 }
67937
67938 +#ifdef CONFIG_PAX_SEGMEXEC
67939 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67940 +{
67941 + struct mm_struct *mm = vma->vm_mm;
67942 + spinlock_t *ptl;
67943 + pte_t *pte, entry;
67944 +
67945 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67946 + entry = *pte;
67947 + if (!pte_present(entry)) {
67948 + if (!pte_none(entry)) {
67949 + BUG_ON(pte_file(entry));
67950 + free_swap_and_cache(pte_to_swp_entry(entry));
67951 + pte_clear_not_present_full(mm, address, pte, 0);
67952 + }
67953 + } else {
67954 + struct page *page;
67955 +
67956 + flush_cache_page(vma, address, pte_pfn(entry));
67957 + entry = ptep_clear_flush(vma, address, pte);
67958 + BUG_ON(pte_dirty(entry));
67959 + page = vm_normal_page(vma, address, entry);
67960 + if (page) {
67961 + update_hiwater_rss(mm);
67962 + if (PageAnon(page))
67963 + dec_mm_counter_fast(mm, MM_ANONPAGES);
67964 + else
67965 + dec_mm_counter_fast(mm, MM_FILEPAGES);
67966 + page_remove_rmap(page);
67967 + page_cache_release(page);
67968 + }
67969 + }
67970 + pte_unmap_unlock(pte, ptl);
67971 +}
67972 +
67973 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
67974 + *
67975 + * the ptl of the lower mapped page is held on entry and is not released on exit
67976 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67977 + */
67978 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67979 +{
67980 + struct mm_struct *mm = vma->vm_mm;
67981 + unsigned long address_m;
67982 + spinlock_t *ptl_m;
67983 + struct vm_area_struct *vma_m;
67984 + pmd_t *pmd_m;
67985 + pte_t *pte_m, entry_m;
67986 +
67987 + BUG_ON(!page_m || !PageAnon(page_m));
67988 +
67989 + vma_m = pax_find_mirror_vma(vma);
67990 + if (!vma_m)
67991 + return;
67992 +
67993 + BUG_ON(!PageLocked(page_m));
67994 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67995 + address_m = address + SEGMEXEC_TASK_SIZE;
67996 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67997 + pte_m = pte_offset_map(pmd_m, address_m);
67998 + ptl_m = pte_lockptr(mm, pmd_m);
67999 + if (ptl != ptl_m) {
68000 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68001 + if (!pte_none(*pte_m))
68002 + goto out;
68003 + }
68004 +
68005 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68006 + page_cache_get(page_m);
68007 + page_add_anon_rmap(page_m, vma_m, address_m);
68008 + inc_mm_counter_fast(mm, MM_ANONPAGES);
68009 + set_pte_at(mm, address_m, pte_m, entry_m);
68010 + update_mmu_cache(vma_m, address_m, entry_m);
68011 +out:
68012 + if (ptl != ptl_m)
68013 + spin_unlock(ptl_m);
68014 + pte_unmap(pte_m);
68015 + unlock_page(page_m);
68016 +}
68017 +
68018 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68019 +{
68020 + struct mm_struct *mm = vma->vm_mm;
68021 + unsigned long address_m;
68022 + spinlock_t *ptl_m;
68023 + struct vm_area_struct *vma_m;
68024 + pmd_t *pmd_m;
68025 + pte_t *pte_m, entry_m;
68026 +
68027 + BUG_ON(!page_m || PageAnon(page_m));
68028 +
68029 + vma_m = pax_find_mirror_vma(vma);
68030 + if (!vma_m)
68031 + return;
68032 +
68033 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68034 + address_m = address + SEGMEXEC_TASK_SIZE;
68035 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68036 + pte_m = pte_offset_map(pmd_m, address_m);
68037 + ptl_m = pte_lockptr(mm, pmd_m);
68038 + if (ptl != ptl_m) {
68039 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68040 + if (!pte_none(*pte_m))
68041 + goto out;
68042 + }
68043 +
68044 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68045 + page_cache_get(page_m);
68046 + page_add_file_rmap(page_m);
68047 + inc_mm_counter_fast(mm, MM_FILEPAGES);
68048 + set_pte_at(mm, address_m, pte_m, entry_m);
68049 + update_mmu_cache(vma_m, address_m, entry_m);
68050 +out:
68051 + if (ptl != ptl_m)
68052 + spin_unlock(ptl_m);
68053 + pte_unmap(pte_m);
68054 +}
68055 +
68056 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68057 +{
68058 + struct mm_struct *mm = vma->vm_mm;
68059 + unsigned long address_m;
68060 + spinlock_t *ptl_m;
68061 + struct vm_area_struct *vma_m;
68062 + pmd_t *pmd_m;
68063 + pte_t *pte_m, entry_m;
68064 +
68065 + vma_m = pax_find_mirror_vma(vma);
68066 + if (!vma_m)
68067 + return;
68068 +
68069 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68070 + address_m = address + SEGMEXEC_TASK_SIZE;
68071 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68072 + pte_m = pte_offset_map(pmd_m, address_m);
68073 + ptl_m = pte_lockptr(mm, pmd_m);
68074 + if (ptl != ptl_m) {
68075 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68076 + if (!pte_none(*pte_m))
68077 + goto out;
68078 + }
68079 +
68080 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68081 + set_pte_at(mm, address_m, pte_m, entry_m);
68082 +out:
68083 + if (ptl != ptl_m)
68084 + spin_unlock(ptl_m);
68085 + pte_unmap(pte_m);
68086 +}
68087 +
68088 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68089 +{
68090 + struct page *page_m;
68091 + pte_t entry;
68092 +
68093 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68094 + goto out;
68095 +
68096 + entry = *pte;
68097 + page_m = vm_normal_page(vma, address, entry);
68098 + if (!page_m)
68099 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68100 + else if (PageAnon(page_m)) {
68101 + if (pax_find_mirror_vma(vma)) {
68102 + pte_unmap_unlock(pte, ptl);
68103 + lock_page(page_m);
68104 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68105 + if (pte_same(entry, *pte))
68106 + pax_mirror_anon_pte(vma, address, page_m, ptl);
68107 + else
68108 + unlock_page(page_m);
68109 + }
68110 + } else
68111 + pax_mirror_file_pte(vma, address, page_m, ptl);
68112 +
68113 +out:
68114 + pte_unmap_unlock(pte, ptl);
68115 +}
68116 +#endif
68117 +
68118 /*
68119 * This routine handles present pages, when users try to write
68120 * to a shared page. It is done by copying the page to a new address
68121 @@ -2656,6 +2849,12 @@ gotten:
68122 */
68123 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68124 if (likely(pte_same(*page_table, orig_pte))) {
68125 +
68126 +#ifdef CONFIG_PAX_SEGMEXEC
68127 + if (pax_find_mirror_vma(vma))
68128 + BUG_ON(!trylock_page(new_page));
68129 +#endif
68130 +
68131 if (old_page) {
68132 if (!PageAnon(old_page)) {
68133 dec_mm_counter_fast(mm, MM_FILEPAGES);
68134 @@ -2707,6 +2906,10 @@ gotten:
68135 page_remove_rmap(old_page);
68136 }
68137
68138 +#ifdef CONFIG_PAX_SEGMEXEC
68139 + pax_mirror_anon_pte(vma, address, new_page, ptl);
68140 +#endif
68141 +
68142 /* Free the old page.. */
68143 new_page = old_page;
68144 ret |= VM_FAULT_WRITE;
68145 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68146 swap_free(entry);
68147 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68148 try_to_free_swap(page);
68149 +
68150 +#ifdef CONFIG_PAX_SEGMEXEC
68151 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68152 +#endif
68153 +
68154 unlock_page(page);
68155 if (swapcache) {
68156 /*
68157 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68158
68159 /* No need to invalidate - it was non-present before */
68160 update_mmu_cache(vma, address, page_table);
68161 +
68162 +#ifdef CONFIG_PAX_SEGMEXEC
68163 + pax_mirror_anon_pte(vma, address, page, ptl);
68164 +#endif
68165 +
68166 unlock:
68167 pte_unmap_unlock(page_table, ptl);
68168 out:
68169 @@ -3028,40 +3241,6 @@ out_release:
68170 }
68171
68172 /*
68173 - * This is like a special single-page "expand_{down|up}wards()",
68174 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
68175 - * doesn't hit another vma.
68176 - */
68177 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68178 -{
68179 - address &= PAGE_MASK;
68180 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68181 - struct vm_area_struct *prev = vma->vm_prev;
68182 -
68183 - /*
68184 - * Is there a mapping abutting this one below?
68185 - *
68186 - * That's only ok if it's the same stack mapping
68187 - * that has gotten split..
68188 - */
68189 - if (prev && prev->vm_end == address)
68190 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68191 -
68192 - expand_downwards(vma, address - PAGE_SIZE);
68193 - }
68194 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68195 - struct vm_area_struct *next = vma->vm_next;
68196 -
68197 - /* As VM_GROWSDOWN but s/below/above/ */
68198 - if (next && next->vm_start == address + PAGE_SIZE)
68199 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68200 -
68201 - expand_upwards(vma, address + PAGE_SIZE);
68202 - }
68203 - return 0;
68204 -}
68205 -
68206 -/*
68207 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68208 * but allow concurrent faults), and pte mapped but not yet locked.
68209 * We return with mmap_sem still held, but pte unmapped and unlocked.
68210 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68211 unsigned long address, pte_t *page_table, pmd_t *pmd,
68212 unsigned int flags)
68213 {
68214 - struct page *page;
68215 + struct page *page = NULL;
68216 spinlock_t *ptl;
68217 pte_t entry;
68218
68219 - pte_unmap(page_table);
68220 -
68221 - /* Check if we need to add a guard page to the stack */
68222 - if (check_stack_guard_page(vma, address) < 0)
68223 - return VM_FAULT_SIGBUS;
68224 -
68225 - /* Use the zero-page for reads */
68226 if (!(flags & FAULT_FLAG_WRITE)) {
68227 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68228 vma->vm_page_prot));
68229 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68230 + ptl = pte_lockptr(mm, pmd);
68231 + spin_lock(ptl);
68232 if (!pte_none(*page_table))
68233 goto unlock;
68234 goto setpte;
68235 }
68236
68237 /* Allocate our own private page. */
68238 + pte_unmap(page_table);
68239 +
68240 if (unlikely(anon_vma_prepare(vma)))
68241 goto oom;
68242 page = alloc_zeroed_user_highpage_movable(vma, address);
68243 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68244 if (!pte_none(*page_table))
68245 goto release;
68246
68247 +#ifdef CONFIG_PAX_SEGMEXEC
68248 + if (pax_find_mirror_vma(vma))
68249 + BUG_ON(!trylock_page(page));
68250 +#endif
68251 +
68252 inc_mm_counter_fast(mm, MM_ANONPAGES);
68253 page_add_new_anon_rmap(page, vma, address);
68254 setpte:
68255 @@ -3116,6 +3296,12 @@ setpte:
68256
68257 /* No need to invalidate - it was non-present before */
68258 update_mmu_cache(vma, address, page_table);
68259 +
68260 +#ifdef CONFIG_PAX_SEGMEXEC
68261 + if (page)
68262 + pax_mirror_anon_pte(vma, address, page, ptl);
68263 +#endif
68264 +
68265 unlock:
68266 pte_unmap_unlock(page_table, ptl);
68267 return 0;
68268 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68269 */
68270 /* Only go through if we didn't race with anybody else... */
68271 if (likely(pte_same(*page_table, orig_pte))) {
68272 +
68273 +#ifdef CONFIG_PAX_SEGMEXEC
68274 + if (anon && pax_find_mirror_vma(vma))
68275 + BUG_ON(!trylock_page(page));
68276 +#endif
68277 +
68278 flush_icache_page(vma, page);
68279 entry = mk_pte(page, vma->vm_page_prot);
68280 if (flags & FAULT_FLAG_WRITE)
68281 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68282
68283 /* no need to invalidate: a not-present page won't be cached */
68284 update_mmu_cache(vma, address, page_table);
68285 +
68286 +#ifdef CONFIG_PAX_SEGMEXEC
68287 + if (anon)
68288 + pax_mirror_anon_pte(vma, address, page, ptl);
68289 + else
68290 + pax_mirror_file_pte(vma, address, page, ptl);
68291 +#endif
68292 +
68293 } else {
68294 if (cow_page)
68295 mem_cgroup_uncharge_page(cow_page);
68296 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68297 if (flags & FAULT_FLAG_WRITE)
68298 flush_tlb_fix_spurious_fault(vma, address);
68299 }
68300 +
68301 +#ifdef CONFIG_PAX_SEGMEXEC
68302 + pax_mirror_pte(vma, address, pte, pmd, ptl);
68303 + return 0;
68304 +#endif
68305 +
68306 unlock:
68307 pte_unmap_unlock(pte, ptl);
68308 return 0;
68309 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68310 pmd_t *pmd;
68311 pte_t *pte;
68312
68313 +#ifdef CONFIG_PAX_SEGMEXEC
68314 + struct vm_area_struct *vma_m;
68315 +#endif
68316 +
68317 __set_current_state(TASK_RUNNING);
68318
68319 count_vm_event(PGFAULT);
68320 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68321 if (unlikely(is_vm_hugetlb_page(vma)))
68322 return hugetlb_fault(mm, vma, address, flags);
68323
68324 +#ifdef CONFIG_PAX_SEGMEXEC
68325 + vma_m = pax_find_mirror_vma(vma);
68326 + if (vma_m) {
68327 + unsigned long address_m;
68328 + pgd_t *pgd_m;
68329 + pud_t *pud_m;
68330 + pmd_t *pmd_m;
68331 +
68332 + if (vma->vm_start > vma_m->vm_start) {
68333 + address_m = address;
68334 + address -= SEGMEXEC_TASK_SIZE;
68335 + vma = vma_m;
68336 + } else
68337 + address_m = address + SEGMEXEC_TASK_SIZE;
68338 +
68339 + pgd_m = pgd_offset(mm, address_m);
68340 + pud_m = pud_alloc(mm, pgd_m, address_m);
68341 + if (!pud_m)
68342 + return VM_FAULT_OOM;
68343 + pmd_m = pmd_alloc(mm, pud_m, address_m);
68344 + if (!pmd_m)
68345 + return VM_FAULT_OOM;
68346 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68347 + return VM_FAULT_OOM;
68348 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68349 + }
68350 +#endif
68351 +
68352 pgd = pgd_offset(mm, address);
68353 pud = pud_alloc(mm, pgd, address);
68354 if (!pud)
68355 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68356 * run pte_offset_map on the pmd, if an huge pmd could
68357 * materialize from under us from a different thread.
68358 */
68359 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68360 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68361 return VM_FAULT_OOM;
68362 /* if an huge pmd materialized from under us just retry later */
68363 if (unlikely(pmd_trans_huge(*pmd)))
68364 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68365 gate_vma.vm_start = FIXADDR_USER_START;
68366 gate_vma.vm_end = FIXADDR_USER_END;
68367 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68368 - gate_vma.vm_page_prot = __P101;
68369 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68370 /*
68371 * Make sure the vDSO gets into every core dump.
68372 * Dumping its contents makes post-mortem fully interpretable later
68373 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68374 index c3fdbcb..2e8ef90 100644
68375 --- a/mm/mempolicy.c
68376 +++ b/mm/mempolicy.c
68377 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68378 unsigned long vmstart;
68379 unsigned long vmend;
68380
68381 +#ifdef CONFIG_PAX_SEGMEXEC
68382 + struct vm_area_struct *vma_m;
68383 +#endif
68384 +
68385 vma = find_vma_prev(mm, start, &prev);
68386 if (!vma || vma->vm_start > start)
68387 return -EFAULT;
68388 @@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68389 err = policy_vma(vma, new_pol);
68390 if (err)
68391 goto out;
68392 +
68393 +#ifdef CONFIG_PAX_SEGMEXEC
68394 + vma_m = pax_find_mirror_vma(vma);
68395 + if (vma_m) {
68396 + err = policy_vma(vma_m, new_pol);
68397 + if (err)
68398 + goto out;
68399 + }
68400 +#endif
68401 +
68402 }
68403
68404 out:
68405 @@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68406
68407 if (end < start)
68408 return -EINVAL;
68409 +
68410 +#ifdef CONFIG_PAX_SEGMEXEC
68411 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68412 + if (end > SEGMEXEC_TASK_SIZE)
68413 + return -EINVAL;
68414 + } else
68415 +#endif
68416 +
68417 + if (end > TASK_SIZE)
68418 + return -EINVAL;
68419 +
68420 if (end == start)
68421 return 0;
68422
68423 @@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68424 if (!mm)
68425 goto out;
68426
68427 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68428 + if (mm != current->mm &&
68429 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68430 + err = -EPERM;
68431 + goto out;
68432 + }
68433 +#endif
68434 +
68435 /*
68436 * Check if this process has the right to modify the specified
68437 * process. The right exists if the process has administrative
68438 @@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68439 rcu_read_lock();
68440 tcred = __task_cred(task);
68441 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68442 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68443 - !capable(CAP_SYS_NICE)) {
68444 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68445 rcu_read_unlock();
68446 err = -EPERM;
68447 goto out;
68448 diff --git a/mm/migrate.c b/mm/migrate.c
68449 index 177aca4..ab3a744 100644
68450 --- a/mm/migrate.c
68451 +++ b/mm/migrate.c
68452 @@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68453 if (!mm)
68454 return -EINVAL;
68455
68456 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68457 + if (mm != current->mm &&
68458 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68459 + err = -EPERM;
68460 + goto out;
68461 + }
68462 +#endif
68463 +
68464 /*
68465 * Check if this process has the right to modify the specified
68466 * process. The right exists if the process has administrative
68467 @@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68468 rcu_read_lock();
68469 tcred = __task_cred(task);
68470 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68471 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68472 - !capable(CAP_SYS_NICE)) {
68473 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68474 rcu_read_unlock();
68475 err = -EPERM;
68476 goto out;
68477 diff --git a/mm/mlock.c b/mm/mlock.c
68478 index 4f4f53b..9511904 100644
68479 --- a/mm/mlock.c
68480 +++ b/mm/mlock.c
68481 @@ -13,6 +13,7 @@
68482 #include <linux/pagemap.h>
68483 #include <linux/mempolicy.h>
68484 #include <linux/syscalls.h>
68485 +#include <linux/security.h>
68486 #include <linux/sched.h>
68487 #include <linux/export.h>
68488 #include <linux/rmap.h>
68489 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68490 return -EINVAL;
68491 if (end == start)
68492 return 0;
68493 + if (end > TASK_SIZE)
68494 + return -EINVAL;
68495 +
68496 vma = find_vma_prev(current->mm, start, &prev);
68497 if (!vma || vma->vm_start > start)
68498 return -ENOMEM;
68499 @@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68500 for (nstart = start ; ; ) {
68501 vm_flags_t newflags;
68502
68503 +#ifdef CONFIG_PAX_SEGMEXEC
68504 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68505 + break;
68506 +#endif
68507 +
68508 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68509
68510 newflags = vma->vm_flags | VM_LOCKED;
68511 @@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68512 lock_limit >>= PAGE_SHIFT;
68513
68514 /* check against resource limits */
68515 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68516 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68517 error = do_mlock(start, len, 1);
68518 up_write(&current->mm->mmap_sem);
68519 @@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68520 static int do_mlockall(int flags)
68521 {
68522 struct vm_area_struct * vma, * prev = NULL;
68523 - unsigned int def_flags = 0;
68524
68525 if (flags & MCL_FUTURE)
68526 - def_flags = VM_LOCKED;
68527 - current->mm->def_flags = def_flags;
68528 + current->mm->def_flags |= VM_LOCKED;
68529 + else
68530 + current->mm->def_flags &= ~VM_LOCKED;
68531 if (flags == MCL_FUTURE)
68532 goto out;
68533
68534 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68535 vm_flags_t newflags;
68536
68537 +#ifdef CONFIG_PAX_SEGMEXEC
68538 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68539 + break;
68540 +#endif
68541 +
68542 + BUG_ON(vma->vm_end > TASK_SIZE);
68543 newflags = vma->vm_flags | VM_LOCKED;
68544 if (!(flags & MCL_CURRENT))
68545 newflags &= ~VM_LOCKED;
68546 @@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68547 lock_limit >>= PAGE_SHIFT;
68548
68549 ret = -ENOMEM;
68550 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68551 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68552 capable(CAP_IPC_LOCK))
68553 ret = do_mlockall(flags);
68554 diff --git a/mm/mmap.c b/mm/mmap.c
68555 index eae90af..51ca80b 100644
68556 --- a/mm/mmap.c
68557 +++ b/mm/mmap.c
68558 @@ -46,6 +46,16 @@
68559 #define arch_rebalance_pgtables(addr, len) (addr)
68560 #endif
68561
68562 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68563 +{
68564 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68565 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68566 + up_read(&mm->mmap_sem);
68567 + BUG();
68568 + }
68569 +#endif
68570 +}
68571 +
68572 static void unmap_region(struct mm_struct *mm,
68573 struct vm_area_struct *vma, struct vm_area_struct *prev,
68574 unsigned long start, unsigned long end);
68575 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68576 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68577 *
68578 */
68579 -pgprot_t protection_map[16] = {
68580 +pgprot_t protection_map[16] __read_only = {
68581 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68582 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68583 };
68584
68585 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
68586 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68587 {
68588 - return __pgprot(pgprot_val(protection_map[vm_flags &
68589 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68590 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68591 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68592 +
68593 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68594 + if (!(__supported_pte_mask & _PAGE_NX) &&
68595 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68596 + (vm_flags & (VM_READ | VM_WRITE)))
68597 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68598 +#endif
68599 +
68600 + return prot;
68601 }
68602 EXPORT_SYMBOL(vm_get_page_prot);
68603
68604 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68605 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68606 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68607 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68608 /*
68609 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68610 * other variables. It can be updated by several CPUs frequently.
68611 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
68612 struct vm_area_struct *next = vma->vm_next;
68613
68614 might_sleep();
68615 + BUG_ON(vma->vm_mirror);
68616 if (vma->vm_ops && vma->vm_ops->close)
68617 vma->vm_ops->close(vma);
68618 if (vma->vm_file) {
68619 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68620 * not page aligned -Ram Gupta
68621 */
68622 rlim = rlimit(RLIMIT_DATA);
68623 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68624 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68625 (mm->end_data - mm->start_data) > rlim)
68626 goto out;
68627 @@ -689,6 +711,12 @@ static int
68628 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68629 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68630 {
68631 +
68632 +#ifdef CONFIG_PAX_SEGMEXEC
68633 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68634 + return 0;
68635 +#endif
68636 +
68637 if (is_mergeable_vma(vma, file, vm_flags) &&
68638 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68639 if (vma->vm_pgoff == vm_pgoff)
68640 @@ -708,6 +736,12 @@ static int
68641 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68642 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68643 {
68644 +
68645 +#ifdef CONFIG_PAX_SEGMEXEC
68646 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68647 + return 0;
68648 +#endif
68649 +
68650 if (is_mergeable_vma(vma, file, vm_flags) &&
68651 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68652 pgoff_t vm_pglen;
68653 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68654 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68655 struct vm_area_struct *prev, unsigned long addr,
68656 unsigned long end, unsigned long vm_flags,
68657 - struct anon_vma *anon_vma, struct file *file,
68658 + struct anon_vma *anon_vma, struct file *file,
68659 pgoff_t pgoff, struct mempolicy *policy)
68660 {
68661 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68662 struct vm_area_struct *area, *next;
68663 int err;
68664
68665 +#ifdef CONFIG_PAX_SEGMEXEC
68666 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68667 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68668 +
68669 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68670 +#endif
68671 +
68672 /*
68673 * We later require that vma->vm_flags == vm_flags,
68674 * so this tests vma->vm_flags & VM_SPECIAL, too.
68675 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68676 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68677 next = next->vm_next;
68678
68679 +#ifdef CONFIG_PAX_SEGMEXEC
68680 + if (prev)
68681 + prev_m = pax_find_mirror_vma(prev);
68682 + if (area)
68683 + area_m = pax_find_mirror_vma(area);
68684 + if (next)
68685 + next_m = pax_find_mirror_vma(next);
68686 +#endif
68687 +
68688 /*
68689 * Can it merge with the predecessor?
68690 */
68691 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68692 /* cases 1, 6 */
68693 err = vma_adjust(prev, prev->vm_start,
68694 next->vm_end, prev->vm_pgoff, NULL);
68695 - } else /* cases 2, 5, 7 */
68696 +
68697 +#ifdef CONFIG_PAX_SEGMEXEC
68698 + if (!err && prev_m)
68699 + err = vma_adjust(prev_m, prev_m->vm_start,
68700 + next_m->vm_end, prev_m->vm_pgoff, NULL);
68701 +#endif
68702 +
68703 + } else { /* cases 2, 5, 7 */
68704 err = vma_adjust(prev, prev->vm_start,
68705 end, prev->vm_pgoff, NULL);
68706 +
68707 +#ifdef CONFIG_PAX_SEGMEXEC
68708 + if (!err && prev_m)
68709 + err = vma_adjust(prev_m, prev_m->vm_start,
68710 + end_m, prev_m->vm_pgoff, NULL);
68711 +#endif
68712 +
68713 + }
68714 if (err)
68715 return NULL;
68716 khugepaged_enter_vma_merge(prev);
68717 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68718 mpol_equal(policy, vma_policy(next)) &&
68719 can_vma_merge_before(next, vm_flags,
68720 anon_vma, file, pgoff+pglen)) {
68721 - if (prev && addr < prev->vm_end) /* case 4 */
68722 + if (prev && addr < prev->vm_end) { /* case 4 */
68723 err = vma_adjust(prev, prev->vm_start,
68724 addr, prev->vm_pgoff, NULL);
68725 - else /* cases 3, 8 */
68726 +
68727 +#ifdef CONFIG_PAX_SEGMEXEC
68728 + if (!err && prev_m)
68729 + err = vma_adjust(prev_m, prev_m->vm_start,
68730 + addr_m, prev_m->vm_pgoff, NULL);
68731 +#endif
68732 +
68733 + } else { /* cases 3, 8 */
68734 err = vma_adjust(area, addr, next->vm_end,
68735 next->vm_pgoff - pglen, NULL);
68736 +
68737 +#ifdef CONFIG_PAX_SEGMEXEC
68738 + if (!err && area_m)
68739 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
68740 + next_m->vm_pgoff - pglen, NULL);
68741 +#endif
68742 +
68743 + }
68744 if (err)
68745 return NULL;
68746 khugepaged_enter_vma_merge(area);
68747 @@ -921,14 +1001,11 @@ none:
68748 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68749 struct file *file, long pages)
68750 {
68751 - const unsigned long stack_flags
68752 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68753 -
68754 if (file) {
68755 mm->shared_vm += pages;
68756 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68757 mm->exec_vm += pages;
68758 - } else if (flags & stack_flags)
68759 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68760 mm->stack_vm += pages;
68761 if (flags & (VM_RESERVED|VM_IO))
68762 mm->reserved_vm += pages;
68763 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68764 * (the exception is when the underlying filesystem is noexec
68765 * mounted, in which case we dont add PROT_EXEC.)
68766 */
68767 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68768 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68769 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68770 prot |= PROT_EXEC;
68771
68772 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68773 /* Obtain the address to map to. we verify (or select) it and ensure
68774 * that it represents a valid section of the address space.
68775 */
68776 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
68777 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68778 if (addr & ~PAGE_MASK)
68779 return addr;
68780
68781 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68782 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68783 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68784
68785 +#ifdef CONFIG_PAX_MPROTECT
68786 + if (mm->pax_flags & MF_PAX_MPROTECT) {
68787 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
68788 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68789 + gr_log_rwxmmap(file);
68790 +
68791 +#ifdef CONFIG_PAX_EMUPLT
68792 + vm_flags &= ~VM_EXEC;
68793 +#else
68794 + return -EPERM;
68795 +#endif
68796 +
68797 + }
68798 +
68799 + if (!(vm_flags & VM_EXEC))
68800 + vm_flags &= ~VM_MAYEXEC;
68801 +#else
68802 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68803 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68804 +#endif
68805 + else
68806 + vm_flags &= ~VM_MAYWRITE;
68807 + }
68808 +#endif
68809 +
68810 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68811 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68812 + vm_flags &= ~VM_PAGEEXEC;
68813 +#endif
68814 +
68815 if (flags & MAP_LOCKED)
68816 if (!can_do_mlock())
68817 return -EPERM;
68818 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68819 locked += mm->locked_vm;
68820 lock_limit = rlimit(RLIMIT_MEMLOCK);
68821 lock_limit >>= PAGE_SHIFT;
68822 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68823 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68824 return -EAGAIN;
68825 }
68826 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68827 if (error)
68828 return error;
68829
68830 + if (!gr_acl_handle_mmap(file, prot))
68831 + return -EACCES;
68832 +
68833 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68834 }
68835 EXPORT_SYMBOL(do_mmap_pgoff);
68836 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
68837 vm_flags_t vm_flags = vma->vm_flags;
68838
68839 /* If it was private or non-writable, the write bit is already clear */
68840 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68841 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68842 return 0;
68843
68844 /* The backer wishes to know when pages are first written to? */
68845 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
68846 unsigned long charged = 0;
68847 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68848
68849 +#ifdef CONFIG_PAX_SEGMEXEC
68850 + struct vm_area_struct *vma_m = NULL;
68851 +#endif
68852 +
68853 + /*
68854 + * mm->mmap_sem is required to protect against another thread
68855 + * changing the mappings in case we sleep.
68856 + */
68857 + verify_mm_writelocked(mm);
68858 +
68859 /* Clear old maps */
68860 error = -ENOMEM;
68861 -munmap_back:
68862 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68863 if (vma && vma->vm_start < addr + len) {
68864 if (do_munmap(mm, addr, len))
68865 return -ENOMEM;
68866 - goto munmap_back;
68867 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68868 + BUG_ON(vma && vma->vm_start < addr + len);
68869 }
68870
68871 /* Check against address space limit. */
68872 @@ -1258,6 +1379,16 @@ munmap_back:
68873 goto unacct_error;
68874 }
68875
68876 +#ifdef CONFIG_PAX_SEGMEXEC
68877 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68878 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68879 + if (!vma_m) {
68880 + error = -ENOMEM;
68881 + goto free_vma;
68882 + }
68883 + }
68884 +#endif
68885 +
68886 vma->vm_mm = mm;
68887 vma->vm_start = addr;
68888 vma->vm_end = addr + len;
68889 @@ -1281,6 +1412,19 @@ munmap_back:
68890 error = file->f_op->mmap(file, vma);
68891 if (error)
68892 goto unmap_and_free_vma;
68893 +
68894 +#ifdef CONFIG_PAX_SEGMEXEC
68895 + if (vma_m && (vm_flags & VM_EXECUTABLE))
68896 + added_exe_file_vma(mm);
68897 +#endif
68898 +
68899 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68900 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68901 + vma->vm_flags |= VM_PAGEEXEC;
68902 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68903 + }
68904 +#endif
68905 +
68906 if (vm_flags & VM_EXECUTABLE)
68907 added_exe_file_vma(mm);
68908
68909 @@ -1316,6 +1460,11 @@ munmap_back:
68910 vma_link(mm, vma, prev, rb_link, rb_parent);
68911 file = vma->vm_file;
68912
68913 +#ifdef CONFIG_PAX_SEGMEXEC
68914 + if (vma_m)
68915 + BUG_ON(pax_mirror_vma(vma_m, vma));
68916 +#endif
68917 +
68918 /* Once vma denies write, undo our temporary denial count */
68919 if (correct_wcount)
68920 atomic_inc(&inode->i_writecount);
68921 @@ -1324,6 +1473,7 @@ out:
68922
68923 mm->total_vm += len >> PAGE_SHIFT;
68924 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68925 + track_exec_limit(mm, addr, addr + len, vm_flags);
68926 if (vm_flags & VM_LOCKED) {
68927 if (!mlock_vma_pages_range(vma, addr, addr + len))
68928 mm->locked_vm += (len >> PAGE_SHIFT);
68929 @@ -1341,6 +1491,12 @@ unmap_and_free_vma:
68930 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68931 charged = 0;
68932 free_vma:
68933 +
68934 +#ifdef CONFIG_PAX_SEGMEXEC
68935 + if (vma_m)
68936 + kmem_cache_free(vm_area_cachep, vma_m);
68937 +#endif
68938 +
68939 kmem_cache_free(vm_area_cachep, vma);
68940 unacct_error:
68941 if (charged)
68942 @@ -1348,6 +1504,44 @@ unacct_error:
68943 return error;
68944 }
68945
68946 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68947 +{
68948 + if (!vma) {
68949 +#ifdef CONFIG_STACK_GROWSUP
68950 + if (addr > sysctl_heap_stack_gap)
68951 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68952 + else
68953 + vma = find_vma(current->mm, 0);
68954 + if (vma && (vma->vm_flags & VM_GROWSUP))
68955 + return false;
68956 +#endif
68957 + return true;
68958 + }
68959 +
68960 + if (addr + len > vma->vm_start)
68961 + return false;
68962 +
68963 + if (vma->vm_flags & VM_GROWSDOWN)
68964 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68965 +#ifdef CONFIG_STACK_GROWSUP
68966 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68967 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68968 +#endif
68969 +
68970 + return true;
68971 +}
68972 +
68973 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68974 +{
68975 + if (vma->vm_start < len)
68976 + return -ENOMEM;
68977 + if (!(vma->vm_flags & VM_GROWSDOWN))
68978 + return vma->vm_start - len;
68979 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
68980 + return vma->vm_start - len - sysctl_heap_stack_gap;
68981 + return -ENOMEM;
68982 +}
68983 +
68984 /* Get an address range which is currently unmapped.
68985 * For shmat() with addr=0.
68986 *
68987 @@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
68988 if (flags & MAP_FIXED)
68989 return addr;
68990
68991 +#ifdef CONFIG_PAX_RANDMMAP
68992 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68993 +#endif
68994 +
68995 if (addr) {
68996 addr = PAGE_ALIGN(addr);
68997 - vma = find_vma(mm, addr);
68998 - if (TASK_SIZE - len >= addr &&
68999 - (!vma || addr + len <= vma->vm_start))
69000 - return addr;
69001 + if (TASK_SIZE - len >= addr) {
69002 + vma = find_vma(mm, addr);
69003 + if (check_heap_stack_gap(vma, addr, len))
69004 + return addr;
69005 + }
69006 }
69007 if (len > mm->cached_hole_size) {
69008 - start_addr = addr = mm->free_area_cache;
69009 + start_addr = addr = mm->free_area_cache;
69010 } else {
69011 - start_addr = addr = TASK_UNMAPPED_BASE;
69012 - mm->cached_hole_size = 0;
69013 + start_addr = addr = mm->mmap_base;
69014 + mm->cached_hole_size = 0;
69015 }
69016
69017 full_search:
69018 @@ -1396,34 +1595,40 @@ full_search:
69019 * Start a new search - just in case we missed
69020 * some holes.
69021 */
69022 - if (start_addr != TASK_UNMAPPED_BASE) {
69023 - addr = TASK_UNMAPPED_BASE;
69024 - start_addr = addr;
69025 + if (start_addr != mm->mmap_base) {
69026 + start_addr = addr = mm->mmap_base;
69027 mm->cached_hole_size = 0;
69028 goto full_search;
69029 }
69030 return -ENOMEM;
69031 }
69032 - if (!vma || addr + len <= vma->vm_start) {
69033 - /*
69034 - * Remember the place where we stopped the search:
69035 - */
69036 - mm->free_area_cache = addr + len;
69037 - return addr;
69038 - }
69039 + if (check_heap_stack_gap(vma, addr, len))
69040 + break;
69041 if (addr + mm->cached_hole_size < vma->vm_start)
69042 mm->cached_hole_size = vma->vm_start - addr;
69043 addr = vma->vm_end;
69044 }
69045 +
69046 + /*
69047 + * Remember the place where we stopped the search:
69048 + */
69049 + mm->free_area_cache = addr + len;
69050 + return addr;
69051 }
69052 #endif
69053
69054 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69055 {
69056 +
69057 +#ifdef CONFIG_PAX_SEGMEXEC
69058 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69059 + return;
69060 +#endif
69061 +
69062 /*
69063 * Is this a new hole at the lowest possible address?
69064 */
69065 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69066 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69067 mm->free_area_cache = addr;
69068 mm->cached_hole_size = ~0UL;
69069 }
69070 @@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69071 {
69072 struct vm_area_struct *vma;
69073 struct mm_struct *mm = current->mm;
69074 - unsigned long addr = addr0;
69075 + unsigned long base = mm->mmap_base, addr = addr0;
69076
69077 /* requested length too big for entire address space */
69078 if (len > TASK_SIZE)
69079 @@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69080 if (flags & MAP_FIXED)
69081 return addr;
69082
69083 +#ifdef CONFIG_PAX_RANDMMAP
69084 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69085 +#endif
69086 +
69087 /* requesting a specific address */
69088 if (addr) {
69089 addr = PAGE_ALIGN(addr);
69090 - vma = find_vma(mm, addr);
69091 - if (TASK_SIZE - len >= addr &&
69092 - (!vma || addr + len <= vma->vm_start))
69093 - return addr;
69094 + if (TASK_SIZE - len >= addr) {
69095 + vma = find_vma(mm, addr);
69096 + if (check_heap_stack_gap(vma, addr, len))
69097 + return addr;
69098 + }
69099 }
69100
69101 /* check if free_area_cache is useful for us */
69102 @@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69103 /* make sure it can fit in the remaining address space */
69104 if (addr > len) {
69105 vma = find_vma(mm, addr-len);
69106 - if (!vma || addr <= vma->vm_start)
69107 + if (check_heap_stack_gap(vma, addr - len, len))
69108 /* remember the address as a hint for next time */
69109 return (mm->free_area_cache = addr-len);
69110 }
69111 @@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69112 * return with success:
69113 */
69114 vma = find_vma(mm, addr);
69115 - if (!vma || addr+len <= vma->vm_start)
69116 + if (check_heap_stack_gap(vma, addr, len))
69117 /* remember the address as a hint for next time */
69118 return (mm->free_area_cache = addr);
69119
69120 @@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69121 mm->cached_hole_size = vma->vm_start - addr;
69122
69123 /* try just below the current vma->vm_start */
69124 - addr = vma->vm_start-len;
69125 - } while (len < vma->vm_start);
69126 + addr = skip_heap_stack_gap(vma, len);
69127 + } while (!IS_ERR_VALUE(addr));
69128
69129 bottomup:
69130 /*
69131 @@ -1507,13 +1717,21 @@ bottomup:
69132 * can happen with large stack limits and large mmap()
69133 * allocations.
69134 */
69135 + mm->mmap_base = TASK_UNMAPPED_BASE;
69136 +
69137 +#ifdef CONFIG_PAX_RANDMMAP
69138 + if (mm->pax_flags & MF_PAX_RANDMMAP)
69139 + mm->mmap_base += mm->delta_mmap;
69140 +#endif
69141 +
69142 + mm->free_area_cache = mm->mmap_base;
69143 mm->cached_hole_size = ~0UL;
69144 - mm->free_area_cache = TASK_UNMAPPED_BASE;
69145 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69146 /*
69147 * Restore the topdown base:
69148 */
69149 - mm->free_area_cache = mm->mmap_base;
69150 + mm->mmap_base = base;
69151 + mm->free_area_cache = base;
69152 mm->cached_hole_size = ~0UL;
69153
69154 return addr;
69155 @@ -1522,6 +1740,12 @@ bottomup:
69156
69157 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69158 {
69159 +
69160 +#ifdef CONFIG_PAX_SEGMEXEC
69161 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69162 + return;
69163 +#endif
69164 +
69165 /*
69166 * Is this a new hole at the highest possible address?
69167 */
69168 @@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69169 mm->free_area_cache = addr;
69170
69171 /* dont allow allocations above current base */
69172 - if (mm->free_area_cache > mm->mmap_base)
69173 + if (mm->free_area_cache > mm->mmap_base) {
69174 mm->free_area_cache = mm->mmap_base;
69175 + mm->cached_hole_size = ~0UL;
69176 + }
69177 }
69178
69179 unsigned long
69180 @@ -1638,6 +1864,28 @@ out:
69181 return prev ? prev->vm_next : vma;
69182 }
69183
69184 +#ifdef CONFIG_PAX_SEGMEXEC
69185 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69186 +{
69187 + struct vm_area_struct *vma_m;
69188 +
69189 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69190 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69191 + BUG_ON(vma->vm_mirror);
69192 + return NULL;
69193 + }
69194 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69195 + vma_m = vma->vm_mirror;
69196 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69197 + BUG_ON(vma->vm_file != vma_m->vm_file);
69198 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69199 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69200 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69201 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69202 + return vma_m;
69203 +}
69204 +#endif
69205 +
69206 /*
69207 * Verify that the stack growth is acceptable and
69208 * update accounting. This is shared with both the
69209 @@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69210 return -ENOMEM;
69211
69212 /* Stack limit test */
69213 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
69214 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69215 return -ENOMEM;
69216
69217 @@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69218 locked = mm->locked_vm + grow;
69219 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69220 limit >>= PAGE_SHIFT;
69221 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69222 if (locked > limit && !capable(CAP_IPC_LOCK))
69223 return -ENOMEM;
69224 }
69225 @@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69226 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69227 * vma is the last one with address > vma->vm_end. Have to extend vma.
69228 */
69229 +#ifndef CONFIG_IA64
69230 +static
69231 +#endif
69232 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69233 {
69234 int error;
69235 + bool locknext;
69236
69237 if (!(vma->vm_flags & VM_GROWSUP))
69238 return -EFAULT;
69239
69240 + /* Also guard against wrapping around to address 0. */
69241 + if (address < PAGE_ALIGN(address+1))
69242 + address = PAGE_ALIGN(address+1);
69243 + else
69244 + return -ENOMEM;
69245 +
69246 /*
69247 * We must make sure the anon_vma is allocated
69248 * so that the anon_vma locking is not a noop.
69249 */
69250 if (unlikely(anon_vma_prepare(vma)))
69251 return -ENOMEM;
69252 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69253 + if (locknext && anon_vma_prepare(vma->vm_next))
69254 + return -ENOMEM;
69255 vma_lock_anon_vma(vma);
69256 + if (locknext)
69257 + vma_lock_anon_vma(vma->vm_next);
69258
69259 /*
69260 * vma->vm_start/vm_end cannot change under us because the caller
69261 * is required to hold the mmap_sem in read mode. We need the
69262 - * anon_vma lock to serialize against concurrent expand_stacks.
69263 - * Also guard against wrapping around to address 0.
69264 + * anon_vma locks to serialize against concurrent expand_stacks
69265 + * and expand_upwards.
69266 */
69267 - if (address < PAGE_ALIGN(address+4))
69268 - address = PAGE_ALIGN(address+4);
69269 - else {
69270 - vma_unlock_anon_vma(vma);
69271 - return -ENOMEM;
69272 - }
69273 error = 0;
69274
69275 /* Somebody else might have raced and expanded it already */
69276 - if (address > vma->vm_end) {
69277 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69278 + error = -ENOMEM;
69279 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69280 unsigned long size, grow;
69281
69282 size = address - vma->vm_start;
69283 @@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69284 }
69285 }
69286 }
69287 + if (locknext)
69288 + vma_unlock_anon_vma(vma->vm_next);
69289 vma_unlock_anon_vma(vma);
69290 khugepaged_enter_vma_merge(vma);
69291 return error;
69292 @@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma,
69293 unsigned long address)
69294 {
69295 int error;
69296 + bool lockprev = false;
69297 + struct vm_area_struct *prev;
69298
69299 /*
69300 * We must make sure the anon_vma is allocated
69301 @@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma,
69302 if (error)
69303 return error;
69304
69305 + prev = vma->vm_prev;
69306 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69307 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69308 +#endif
69309 + if (lockprev && anon_vma_prepare(prev))
69310 + return -ENOMEM;
69311 + if (lockprev)
69312 + vma_lock_anon_vma(prev);
69313 +
69314 vma_lock_anon_vma(vma);
69315
69316 /*
69317 @@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma,
69318 */
69319
69320 /* Somebody else might have raced and expanded it already */
69321 - if (address < vma->vm_start) {
69322 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69323 + error = -ENOMEM;
69324 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69325 unsigned long size, grow;
69326
69327 +#ifdef CONFIG_PAX_SEGMEXEC
69328 + struct vm_area_struct *vma_m;
69329 +
69330 + vma_m = pax_find_mirror_vma(vma);
69331 +#endif
69332 +
69333 size = vma->vm_end - address;
69334 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69335
69336 @@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma,
69337 if (!error) {
69338 vma->vm_start = address;
69339 vma->vm_pgoff -= grow;
69340 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69341 +
69342 +#ifdef CONFIG_PAX_SEGMEXEC
69343 + if (vma_m) {
69344 + vma_m->vm_start -= grow << PAGE_SHIFT;
69345 + vma_m->vm_pgoff -= grow;
69346 + }
69347 +#endif
69348 +
69349 perf_event_mmap(vma);
69350 }
69351 }
69352 }
69353 vma_unlock_anon_vma(vma);
69354 + if (lockprev)
69355 + vma_unlock_anon_vma(prev);
69356 khugepaged_enter_vma_merge(vma);
69357 return error;
69358 }
69359 @@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69360 do {
69361 long nrpages = vma_pages(vma);
69362
69363 +#ifdef CONFIG_PAX_SEGMEXEC
69364 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69365 + vma = remove_vma(vma);
69366 + continue;
69367 + }
69368 +#endif
69369 +
69370 mm->total_vm -= nrpages;
69371 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69372 vma = remove_vma(vma);
69373 @@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69374 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69375 vma->vm_prev = NULL;
69376 do {
69377 +
69378 +#ifdef CONFIG_PAX_SEGMEXEC
69379 + if (vma->vm_mirror) {
69380 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69381 + vma->vm_mirror->vm_mirror = NULL;
69382 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
69383 + vma->vm_mirror = NULL;
69384 + }
69385 +#endif
69386 +
69387 rb_erase(&vma->vm_rb, &mm->mm_rb);
69388 mm->map_count--;
69389 tail_vma = vma;
69390 @@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69391 struct vm_area_struct *new;
69392 int err = -ENOMEM;
69393
69394 +#ifdef CONFIG_PAX_SEGMEXEC
69395 + struct vm_area_struct *vma_m, *new_m = NULL;
69396 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69397 +#endif
69398 +
69399 if (is_vm_hugetlb_page(vma) && (addr &
69400 ~(huge_page_mask(hstate_vma(vma)))))
69401 return -EINVAL;
69402
69403 +#ifdef CONFIG_PAX_SEGMEXEC
69404 + vma_m = pax_find_mirror_vma(vma);
69405 +#endif
69406 +
69407 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69408 if (!new)
69409 goto out_err;
69410
69411 +#ifdef CONFIG_PAX_SEGMEXEC
69412 + if (vma_m) {
69413 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69414 + if (!new_m) {
69415 + kmem_cache_free(vm_area_cachep, new);
69416 + goto out_err;
69417 + }
69418 + }
69419 +#endif
69420 +
69421 /* most fields are the same, copy all, and then fixup */
69422 *new = *vma;
69423
69424 @@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69425 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69426 }
69427
69428 +#ifdef CONFIG_PAX_SEGMEXEC
69429 + if (vma_m) {
69430 + *new_m = *vma_m;
69431 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
69432 + new_m->vm_mirror = new;
69433 + new->vm_mirror = new_m;
69434 +
69435 + if (new_below)
69436 + new_m->vm_end = addr_m;
69437 + else {
69438 + new_m->vm_start = addr_m;
69439 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69440 + }
69441 + }
69442 +#endif
69443 +
69444 pol = mpol_dup(vma_policy(vma));
69445 if (IS_ERR(pol)) {
69446 err = PTR_ERR(pol);
69447 @@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69448 else
69449 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69450
69451 +#ifdef CONFIG_PAX_SEGMEXEC
69452 + if (!err && vma_m) {
69453 + if (anon_vma_clone(new_m, vma_m))
69454 + goto out_free_mpol;
69455 +
69456 + mpol_get(pol);
69457 + vma_set_policy(new_m, pol);
69458 +
69459 + if (new_m->vm_file) {
69460 + get_file(new_m->vm_file);
69461 + if (vma_m->vm_flags & VM_EXECUTABLE)
69462 + added_exe_file_vma(mm);
69463 + }
69464 +
69465 + if (new_m->vm_ops && new_m->vm_ops->open)
69466 + new_m->vm_ops->open(new_m);
69467 +
69468 + if (new_below)
69469 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69470 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69471 + else
69472 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69473 +
69474 + if (err) {
69475 + if (new_m->vm_ops && new_m->vm_ops->close)
69476 + new_m->vm_ops->close(new_m);
69477 + if (new_m->vm_file) {
69478 + if (vma_m->vm_flags & VM_EXECUTABLE)
69479 + removed_exe_file_vma(mm);
69480 + fput(new_m->vm_file);
69481 + }
69482 + mpol_put(pol);
69483 + }
69484 + }
69485 +#endif
69486 +
69487 /* Success. */
69488 if (!err)
69489 return 0;
69490 @@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69491 removed_exe_file_vma(mm);
69492 fput(new->vm_file);
69493 }
69494 - unlink_anon_vmas(new);
69495 out_free_mpol:
69496 mpol_put(pol);
69497 out_free_vma:
69498 +
69499 +#ifdef CONFIG_PAX_SEGMEXEC
69500 + if (new_m) {
69501 + unlink_anon_vmas(new_m);
69502 + kmem_cache_free(vm_area_cachep, new_m);
69503 + }
69504 +#endif
69505 +
69506 + unlink_anon_vmas(new);
69507 kmem_cache_free(vm_area_cachep, new);
69508 out_err:
69509 return err;
69510 @@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69511 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69512 unsigned long addr, int new_below)
69513 {
69514 +
69515 +#ifdef CONFIG_PAX_SEGMEXEC
69516 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69517 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69518 + if (mm->map_count >= sysctl_max_map_count-1)
69519 + return -ENOMEM;
69520 + } else
69521 +#endif
69522 +
69523 if (mm->map_count >= sysctl_max_map_count)
69524 return -ENOMEM;
69525
69526 @@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69527 * work. This now handles partial unmappings.
69528 * Jeremy Fitzhardinge <jeremy@goop.org>
69529 */
69530 +#ifdef CONFIG_PAX_SEGMEXEC
69531 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69532 {
69533 + int ret = __do_munmap(mm, start, len);
69534 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69535 + return ret;
69536 +
69537 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69538 +}
69539 +
69540 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69541 +#else
69542 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69543 +#endif
69544 +{
69545 unsigned long end;
69546 struct vm_area_struct *vma, *prev, *last;
69547
69548 + /*
69549 + * mm->mmap_sem is required to protect against another thread
69550 + * changing the mappings in case we sleep.
69551 + */
69552 + verify_mm_writelocked(mm);
69553 +
69554 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69555 return -EINVAL;
69556
69557 @@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69558 /* Fix up all other VM information */
69559 remove_vma_list(mm, vma);
69560
69561 + track_exec_limit(mm, start, end, 0UL);
69562 +
69563 return 0;
69564 }
69565
69566 @@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
69567
69568 profile_munmap(addr);
69569
69570 +#ifdef CONFIG_PAX_SEGMEXEC
69571 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69572 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69573 + return -EINVAL;
69574 +#endif
69575 +
69576 down_write(&mm->mmap_sem);
69577 ret = do_munmap(mm, addr, len);
69578 up_write(&mm->mmap_sem);
69579 return ret;
69580 }
69581
69582 -static inline void verify_mm_writelocked(struct mm_struct *mm)
69583 -{
69584 -#ifdef CONFIG_DEBUG_VM
69585 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69586 - WARN_ON(1);
69587 - up_read(&mm->mmap_sem);
69588 - }
69589 -#endif
69590 -}
69591 -
69592 /*
69593 * this is really a simplified "do_mmap". it only handles
69594 * anonymous maps. eventually we may be able to do some
69595 @@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69596 struct rb_node ** rb_link, * rb_parent;
69597 pgoff_t pgoff = addr >> PAGE_SHIFT;
69598 int error;
69599 + unsigned long charged;
69600
69601 len = PAGE_ALIGN(len);
69602 if (!len)
69603 @@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69604
69605 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69606
69607 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69608 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69609 + flags &= ~VM_EXEC;
69610 +
69611 +#ifdef CONFIG_PAX_MPROTECT
69612 + if (mm->pax_flags & MF_PAX_MPROTECT)
69613 + flags &= ~VM_MAYEXEC;
69614 +#endif
69615 +
69616 + }
69617 +#endif
69618 +
69619 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69620 if (error & ~PAGE_MASK)
69621 return error;
69622
69623 + charged = len >> PAGE_SHIFT;
69624 +
69625 /*
69626 * mlock MCL_FUTURE?
69627 */
69628 if (mm->def_flags & VM_LOCKED) {
69629 unsigned long locked, lock_limit;
69630 - locked = len >> PAGE_SHIFT;
69631 + locked = charged;
69632 locked += mm->locked_vm;
69633 lock_limit = rlimit(RLIMIT_MEMLOCK);
69634 lock_limit >>= PAGE_SHIFT;
69635 @@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69636 /*
69637 * Clear old maps. this also does some error checking for us
69638 */
69639 - munmap_back:
69640 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69641 if (vma && vma->vm_start < addr + len) {
69642 if (do_munmap(mm, addr, len))
69643 return -ENOMEM;
69644 - goto munmap_back;
69645 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69646 + BUG_ON(vma && vma->vm_start < addr + len);
69647 }
69648
69649 /* Check against address space limits *after* clearing old maps... */
69650 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69651 + if (!may_expand_vm(mm, charged))
69652 return -ENOMEM;
69653
69654 if (mm->map_count > sysctl_max_map_count)
69655 return -ENOMEM;
69656
69657 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
69658 + if (security_vm_enough_memory(charged))
69659 return -ENOMEM;
69660
69661 /* Can we just expand an old private anonymous mapping? */
69662 @@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69663 */
69664 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69665 if (!vma) {
69666 - vm_unacct_memory(len >> PAGE_SHIFT);
69667 + vm_unacct_memory(charged);
69668 return -ENOMEM;
69669 }
69670
69671 @@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69672 vma_link(mm, vma, prev, rb_link, rb_parent);
69673 out:
69674 perf_event_mmap(vma);
69675 - mm->total_vm += len >> PAGE_SHIFT;
69676 + mm->total_vm += charged;
69677 if (flags & VM_LOCKED) {
69678 if (!mlock_vma_pages_range(vma, addr, addr + len))
69679 - mm->locked_vm += (len >> PAGE_SHIFT);
69680 + mm->locked_vm += charged;
69681 }
69682 + track_exec_limit(mm, addr, addr + len, flags);
69683 return addr;
69684 }
69685
69686 @@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
69687 * Walk the list again, actually closing and freeing it,
69688 * with preemption enabled, without holding any MM locks.
69689 */
69690 - while (vma)
69691 + while (vma) {
69692 + vma->vm_mirror = NULL;
69693 vma = remove_vma(vma);
69694 + }
69695
69696 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69697 }
69698 @@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69699 struct vm_area_struct * __vma, * prev;
69700 struct rb_node ** rb_link, * rb_parent;
69701
69702 +#ifdef CONFIG_PAX_SEGMEXEC
69703 + struct vm_area_struct *vma_m = NULL;
69704 +#endif
69705 +
69706 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69707 + return -EPERM;
69708 +
69709 /*
69710 * The vm_pgoff of a purely anonymous vma should be irrelevant
69711 * until its first write fault, when page's anon_vma and index
69712 @@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69713 if ((vma->vm_flags & VM_ACCOUNT) &&
69714 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69715 return -ENOMEM;
69716 +
69717 +#ifdef CONFIG_PAX_SEGMEXEC
69718 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69719 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69720 + if (!vma_m)
69721 + return -ENOMEM;
69722 + }
69723 +#endif
69724 +
69725 vma_link(mm, vma, prev, rb_link, rb_parent);
69726 +
69727 +#ifdef CONFIG_PAX_SEGMEXEC
69728 + if (vma_m)
69729 + BUG_ON(pax_mirror_vma(vma_m, vma));
69730 +#endif
69731 +
69732 return 0;
69733 }
69734
69735 @@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69736 struct rb_node **rb_link, *rb_parent;
69737 struct mempolicy *pol;
69738
69739 + BUG_ON(vma->vm_mirror);
69740 +
69741 /*
69742 * If anonymous vma has not yet been faulted, update new pgoff
69743 * to match new location, to increase its chance of merging.
69744 @@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69745 return NULL;
69746 }
69747
69748 +#ifdef CONFIG_PAX_SEGMEXEC
69749 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69750 +{
69751 + struct vm_area_struct *prev_m;
69752 + struct rb_node **rb_link_m, *rb_parent_m;
69753 + struct mempolicy *pol_m;
69754 +
69755 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69756 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69757 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69758 + *vma_m = *vma;
69759 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69760 + if (anon_vma_clone(vma_m, vma))
69761 + return -ENOMEM;
69762 + pol_m = vma_policy(vma_m);
69763 + mpol_get(pol_m);
69764 + vma_set_policy(vma_m, pol_m);
69765 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69766 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69767 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69768 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69769 + if (vma_m->vm_file)
69770 + get_file(vma_m->vm_file);
69771 + if (vma_m->vm_ops && vma_m->vm_ops->open)
69772 + vma_m->vm_ops->open(vma_m);
69773 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69774 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69775 + vma_m->vm_mirror = vma;
69776 + vma->vm_mirror = vma_m;
69777 + return 0;
69778 +}
69779 +#endif
69780 +
69781 /*
69782 * Return true if the calling process may expand its vm space by the passed
69783 * number of pages
69784 @@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
69785 unsigned long lim;
69786
69787 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69788 -
69789 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69790 if (cur + npages > lim)
69791 return 0;
69792 return 1;
69793 @@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm,
69794 vma->vm_start = addr;
69795 vma->vm_end = addr + len;
69796
69797 +#ifdef CONFIG_PAX_MPROTECT
69798 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69799 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69800 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69801 + return -EPERM;
69802 + if (!(vm_flags & VM_EXEC))
69803 + vm_flags &= ~VM_MAYEXEC;
69804 +#else
69805 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69806 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69807 +#endif
69808 + else
69809 + vm_flags &= ~VM_MAYWRITE;
69810 + }
69811 +#endif
69812 +
69813 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69814 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69815
69816 diff --git a/mm/mprotect.c b/mm/mprotect.c
69817 index 5a688a2..27e031c 100644
69818 --- a/mm/mprotect.c
69819 +++ b/mm/mprotect.c
69820 @@ -23,10 +23,16 @@
69821 #include <linux/mmu_notifier.h>
69822 #include <linux/migrate.h>
69823 #include <linux/perf_event.h>
69824 +
69825 +#ifdef CONFIG_PAX_MPROTECT
69826 +#include <linux/elf.h>
69827 +#endif
69828 +
69829 #include <asm/uaccess.h>
69830 #include <asm/pgtable.h>
69831 #include <asm/cacheflush.h>
69832 #include <asm/tlbflush.h>
69833 +#include <asm/mmu_context.h>
69834
69835 #ifndef pgprot_modify
69836 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69837 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
69838 flush_tlb_range(vma, start, end);
69839 }
69840
69841 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69842 +/* called while holding the mmap semaphor for writing except stack expansion */
69843 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69844 +{
69845 + unsigned long oldlimit, newlimit = 0UL;
69846 +
69847 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69848 + return;
69849 +
69850 + spin_lock(&mm->page_table_lock);
69851 + oldlimit = mm->context.user_cs_limit;
69852 + if ((prot & VM_EXEC) && oldlimit < end)
69853 + /* USER_CS limit moved up */
69854 + newlimit = end;
69855 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69856 + /* USER_CS limit moved down */
69857 + newlimit = start;
69858 +
69859 + if (newlimit) {
69860 + mm->context.user_cs_limit = newlimit;
69861 +
69862 +#ifdef CONFIG_SMP
69863 + wmb();
69864 + cpus_clear(mm->context.cpu_user_cs_mask);
69865 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69866 +#endif
69867 +
69868 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69869 + }
69870 + spin_unlock(&mm->page_table_lock);
69871 + if (newlimit == end) {
69872 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
69873 +
69874 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
69875 + if (is_vm_hugetlb_page(vma))
69876 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69877 + else
69878 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69879 + }
69880 +}
69881 +#endif
69882 +
69883 int
69884 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69885 unsigned long start, unsigned long end, unsigned long newflags)
69886 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69887 int error;
69888 int dirty_accountable = 0;
69889
69890 +#ifdef CONFIG_PAX_SEGMEXEC
69891 + struct vm_area_struct *vma_m = NULL;
69892 + unsigned long start_m, end_m;
69893 +
69894 + start_m = start + SEGMEXEC_TASK_SIZE;
69895 + end_m = end + SEGMEXEC_TASK_SIZE;
69896 +#endif
69897 +
69898 if (newflags == oldflags) {
69899 *pprev = vma;
69900 return 0;
69901 }
69902
69903 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69904 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69905 +
69906 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69907 + return -ENOMEM;
69908 +
69909 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69910 + return -ENOMEM;
69911 + }
69912 +
69913 /*
69914 * If we make a private mapping writable we increase our commit;
69915 * but (without finer accounting) cannot reduce our commit if we
69916 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69917 }
69918 }
69919
69920 +#ifdef CONFIG_PAX_SEGMEXEC
69921 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69922 + if (start != vma->vm_start) {
69923 + error = split_vma(mm, vma, start, 1);
69924 + if (error)
69925 + goto fail;
69926 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69927 + *pprev = (*pprev)->vm_next;
69928 + }
69929 +
69930 + if (end != vma->vm_end) {
69931 + error = split_vma(mm, vma, end, 0);
69932 + if (error)
69933 + goto fail;
69934 + }
69935 +
69936 + if (pax_find_mirror_vma(vma)) {
69937 + error = __do_munmap(mm, start_m, end_m - start_m);
69938 + if (error)
69939 + goto fail;
69940 + } else {
69941 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69942 + if (!vma_m) {
69943 + error = -ENOMEM;
69944 + goto fail;
69945 + }
69946 + vma->vm_flags = newflags;
69947 + error = pax_mirror_vma(vma_m, vma);
69948 + if (error) {
69949 + vma->vm_flags = oldflags;
69950 + goto fail;
69951 + }
69952 + }
69953 + }
69954 +#endif
69955 +
69956 /*
69957 * First try to merge with previous and/or next vma.
69958 */
69959 @@ -204,9 +306,21 @@ success:
69960 * vm_flags and vm_page_prot are protected by the mmap_sem
69961 * held in write mode.
69962 */
69963 +
69964 +#ifdef CONFIG_PAX_SEGMEXEC
69965 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69966 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69967 +#endif
69968 +
69969 vma->vm_flags = newflags;
69970 +
69971 +#ifdef CONFIG_PAX_MPROTECT
69972 + if (mm->binfmt && mm->binfmt->handle_mprotect)
69973 + mm->binfmt->handle_mprotect(vma, newflags);
69974 +#endif
69975 +
69976 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69977 - vm_get_page_prot(newflags));
69978 + vm_get_page_prot(vma->vm_flags));
69979
69980 if (vma_wants_writenotify(vma)) {
69981 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69982 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
69983 end = start + len;
69984 if (end <= start)
69985 return -ENOMEM;
69986 +
69987 +#ifdef CONFIG_PAX_SEGMEXEC
69988 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69989 + if (end > SEGMEXEC_TASK_SIZE)
69990 + return -EINVAL;
69991 + } else
69992 +#endif
69993 +
69994 + if (end > TASK_SIZE)
69995 + return -EINVAL;
69996 +
69997 if (!arch_validate_prot(prot))
69998 return -EINVAL;
69999
70000 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70001 /*
70002 * Does the application expect PROT_READ to imply PROT_EXEC:
70003 */
70004 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70005 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70006 prot |= PROT_EXEC;
70007
70008 vm_flags = calc_vm_prot_bits(prot);
70009 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70010 if (start > vma->vm_start)
70011 prev = vma;
70012
70013 +#ifdef CONFIG_PAX_MPROTECT
70014 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70015 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
70016 +#endif
70017 +
70018 for (nstart = start ; ; ) {
70019 unsigned long newflags;
70020
70021 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70022
70023 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70024 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70025 + if (prot & (PROT_WRITE | PROT_EXEC))
70026 + gr_log_rwxmprotect(vma->vm_file);
70027 +
70028 + error = -EACCES;
70029 + goto out;
70030 + }
70031 +
70032 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70033 error = -EACCES;
70034 goto out;
70035 }
70036 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70037 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70038 if (error)
70039 goto out;
70040 +
70041 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
70042 +
70043 nstart = tmp;
70044
70045 if (nstart < prev->vm_end)
70046 diff --git a/mm/mremap.c b/mm/mremap.c
70047 index d6959cb..18a402a 100644
70048 --- a/mm/mremap.c
70049 +++ b/mm/mremap.c
70050 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70051 continue;
70052 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70053 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70054 +
70055 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70056 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70057 + pte = pte_exprotect(pte);
70058 +#endif
70059 +
70060 set_pte_at(mm, new_addr, new_pte, pte);
70061 }
70062
70063 @@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70064 if (is_vm_hugetlb_page(vma))
70065 goto Einval;
70066
70067 +#ifdef CONFIG_PAX_SEGMEXEC
70068 + if (pax_find_mirror_vma(vma))
70069 + goto Einval;
70070 +#endif
70071 +
70072 /* We can't remap across vm area boundaries */
70073 if (old_len > vma->vm_end - addr)
70074 goto Efault;
70075 @@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70076 unsigned long ret = -EINVAL;
70077 unsigned long charged = 0;
70078 unsigned long map_flags;
70079 + unsigned long pax_task_size = TASK_SIZE;
70080
70081 if (new_addr & ~PAGE_MASK)
70082 goto out;
70083
70084 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70085 +#ifdef CONFIG_PAX_SEGMEXEC
70086 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70087 + pax_task_size = SEGMEXEC_TASK_SIZE;
70088 +#endif
70089 +
70090 + pax_task_size -= PAGE_SIZE;
70091 +
70092 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70093 goto out;
70094
70095 /* Check if the location we're moving into overlaps the
70096 * old location at all, and fail if it does.
70097 */
70098 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
70099 - goto out;
70100 -
70101 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
70102 + if (addr + old_len > new_addr && new_addr + new_len > addr)
70103 goto out;
70104
70105 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70106 @@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70107 struct vm_area_struct *vma;
70108 unsigned long ret = -EINVAL;
70109 unsigned long charged = 0;
70110 + unsigned long pax_task_size = TASK_SIZE;
70111
70112 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70113 goto out;
70114 @@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70115 if (!new_len)
70116 goto out;
70117
70118 +#ifdef CONFIG_PAX_SEGMEXEC
70119 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70120 + pax_task_size = SEGMEXEC_TASK_SIZE;
70121 +#endif
70122 +
70123 + pax_task_size -= PAGE_SIZE;
70124 +
70125 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70126 + old_len > pax_task_size || addr > pax_task_size-old_len)
70127 + goto out;
70128 +
70129 if (flags & MREMAP_FIXED) {
70130 if (flags & MREMAP_MAYMOVE)
70131 ret = mremap_to(addr, old_len, new_addr, new_len);
70132 @@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70133 addr + new_len);
70134 }
70135 ret = addr;
70136 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70137 goto out;
70138 }
70139 }
70140 @@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70141 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70142 if (ret)
70143 goto out;
70144 +
70145 + map_flags = vma->vm_flags;
70146 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70147 + if (!(ret & ~PAGE_MASK)) {
70148 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70149 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70150 + }
70151 }
70152 out:
70153 if (ret & ~PAGE_MASK)
70154 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70155 index 7fa41b4..6087460 100644
70156 --- a/mm/nobootmem.c
70157 +++ b/mm/nobootmem.c
70158 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70159 unsigned long __init free_all_memory_core_early(int nodeid)
70160 {
70161 int i;
70162 - u64 start, end;
70163 + u64 start, end, startrange, endrange;
70164 unsigned long count = 0;
70165 - struct range *range = NULL;
70166 + struct range *range = NULL, rangerange = { 0, 0 };
70167 int nr_range;
70168
70169 nr_range = get_free_all_memory_range(&range, nodeid);
70170 + startrange = __pa(range) >> PAGE_SHIFT;
70171 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70172
70173 for (i = 0; i < nr_range; i++) {
70174 start = range[i].start;
70175 end = range[i].end;
70176 + if (start <= endrange && startrange < end) {
70177 + BUG_ON(rangerange.start | rangerange.end);
70178 + rangerange = range[i];
70179 + continue;
70180 + }
70181 count += end - start;
70182 __free_pages_memory(start, end);
70183 }
70184 + start = rangerange.start;
70185 + end = rangerange.end;
70186 + count += end - start;
70187 + __free_pages_memory(start, end);
70188
70189 return count;
70190 }
70191 diff --git a/mm/nommu.c b/mm/nommu.c
70192 index b982290..7d73f53 100644
70193 --- a/mm/nommu.c
70194 +++ b/mm/nommu.c
70195 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70196 int sysctl_overcommit_ratio = 50; /* default is 50% */
70197 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70198 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70199 -int heap_stack_gap = 0;
70200
70201 atomic_long_t mmap_pages_allocated;
70202
70203 @@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70204 EXPORT_SYMBOL(find_vma);
70205
70206 /*
70207 - * find a VMA
70208 - * - we don't extend stack VMAs under NOMMU conditions
70209 - */
70210 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70211 -{
70212 - return find_vma(mm, addr);
70213 -}
70214 -
70215 -/*
70216 * expand a stack to a given address
70217 * - not supported under NOMMU conditions
70218 */
70219 @@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70220
70221 /* most fields are the same, copy all, and then fixup */
70222 *new = *vma;
70223 + INIT_LIST_HEAD(&new->anon_vma_chain);
70224 *region = *vma->vm_region;
70225 new->vm_region = region;
70226
70227 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70228 index 485be89..c059ad3 100644
70229 --- a/mm/page_alloc.c
70230 +++ b/mm/page_alloc.c
70231 @@ -341,7 +341,7 @@ out:
70232 * This usage means that zero-order pages may not be compound.
70233 */
70234
70235 -static void free_compound_page(struct page *page)
70236 +void free_compound_page(struct page *page)
70237 {
70238 __free_pages_ok(page, compound_order(page));
70239 }
70240 @@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70241 int i;
70242 int bad = 0;
70243
70244 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70245 + unsigned long index = 1UL << order;
70246 +#endif
70247 +
70248 trace_mm_page_free_direct(page, order);
70249 kmemcheck_free_shadow(page, order);
70250
70251 @@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70252 debug_check_no_obj_freed(page_address(page),
70253 PAGE_SIZE << order);
70254 }
70255 +
70256 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70257 + for (; index; --index)
70258 + sanitize_highpage(page + index - 1);
70259 +#endif
70260 +
70261 arch_free_page(page, order);
70262 kernel_map_pages(page, 1 << order, 0);
70263
70264 @@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70265 arch_alloc_page(page, order);
70266 kernel_map_pages(page, 1 << order, 1);
70267
70268 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
70269 if (gfp_flags & __GFP_ZERO)
70270 prep_zero_page(page, order, gfp_flags);
70271 +#endif
70272
70273 if (order && (gfp_flags & __GFP_COMP))
70274 prep_compound_page(page, order);
70275 @@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70276 unsigned long pfn;
70277
70278 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70279 +#ifdef CONFIG_X86_32
70280 + /* boot failures in VMware 8 on 32bit vanilla since
70281 + this change */
70282 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70283 +#else
70284 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70285 +#endif
70286 return 1;
70287 }
70288 return 0;
70289 diff --git a/mm/percpu.c b/mm/percpu.c
70290 index 716eb4a..8d10419 100644
70291 --- a/mm/percpu.c
70292 +++ b/mm/percpu.c
70293 @@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70294 static unsigned int pcpu_high_unit_cpu __read_mostly;
70295
70296 /* the address of the first chunk which starts with the kernel static area */
70297 -void *pcpu_base_addr __read_mostly;
70298 +void *pcpu_base_addr __read_only;
70299 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70300
70301 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70302 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70303 index e920aa3..137702a 100644
70304 --- a/mm/process_vm_access.c
70305 +++ b/mm/process_vm_access.c
70306 @@ -13,6 +13,7 @@
70307 #include <linux/uio.h>
70308 #include <linux/sched.h>
70309 #include <linux/highmem.h>
70310 +#include <linux/security.h>
70311 #include <linux/ptrace.h>
70312 #include <linux/slab.h>
70313 #include <linux/syscalls.h>
70314 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70315 size_t iov_l_curr_offset = 0;
70316 ssize_t iov_len;
70317
70318 + return -ENOSYS; // PaX: until properly audited
70319 +
70320 /*
70321 * Work out how many pages of struct pages we're going to need
70322 * when eventually calling get_user_pages
70323 */
70324 for (i = 0; i < riovcnt; i++) {
70325 iov_len = rvec[i].iov_len;
70326 - if (iov_len > 0) {
70327 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
70328 - + iov_len)
70329 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70330 - / PAGE_SIZE + 1;
70331 - nr_pages = max(nr_pages, nr_pages_iov);
70332 - }
70333 + if (iov_len <= 0)
70334 + continue;
70335 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70336 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70337 + nr_pages = max(nr_pages, nr_pages_iov);
70338 }
70339
70340 if (nr_pages == 0)
70341 @@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70342 goto free_proc_pages;
70343 }
70344
70345 - task_lock(task);
70346 - if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70347 - task_unlock(task);
70348 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70349 rc = -EPERM;
70350 goto put_task_struct;
70351 }
70352 - mm = task->mm;
70353
70354 - if (!mm || (task->flags & PF_KTHREAD)) {
70355 - task_unlock(task);
70356 - rc = -EINVAL;
70357 + mm = mm_access(task, PTRACE_MODE_ATTACH);
70358 + if (!mm || IS_ERR(mm)) {
70359 + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
70360 + /*
70361 + * Explicitly map EACCES to EPERM as EPERM is a more a
70362 + * appropriate error code for process_vw_readv/writev
70363 + */
70364 + if (rc == -EACCES)
70365 + rc = -EPERM;
70366 goto put_task_struct;
70367 }
70368
70369 - atomic_inc(&mm->mm_users);
70370 - task_unlock(task);
70371 -
70372 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
70373 rc = process_vm_rw_single_vec(
70374 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
70375 diff --git a/mm/rmap.c b/mm/rmap.c
70376 index a4fd368..e0ffec7 100644
70377 --- a/mm/rmap.c
70378 +++ b/mm/rmap.c
70379 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70380 struct anon_vma *anon_vma = vma->anon_vma;
70381 struct anon_vma_chain *avc;
70382
70383 +#ifdef CONFIG_PAX_SEGMEXEC
70384 + struct anon_vma_chain *avc_m = NULL;
70385 +#endif
70386 +
70387 might_sleep();
70388 if (unlikely(!anon_vma)) {
70389 struct mm_struct *mm = vma->vm_mm;
70390 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70391 if (!avc)
70392 goto out_enomem;
70393
70394 +#ifdef CONFIG_PAX_SEGMEXEC
70395 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70396 + if (!avc_m)
70397 + goto out_enomem_free_avc;
70398 +#endif
70399 +
70400 anon_vma = find_mergeable_anon_vma(vma);
70401 allocated = NULL;
70402 if (!anon_vma) {
70403 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70404 /* page_table_lock to protect against threads */
70405 spin_lock(&mm->page_table_lock);
70406 if (likely(!vma->anon_vma)) {
70407 +
70408 +#ifdef CONFIG_PAX_SEGMEXEC
70409 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70410 +
70411 + if (vma_m) {
70412 + BUG_ON(vma_m->anon_vma);
70413 + vma_m->anon_vma = anon_vma;
70414 + avc_m->anon_vma = anon_vma;
70415 + avc_m->vma = vma;
70416 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70417 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
70418 + avc_m = NULL;
70419 + }
70420 +#endif
70421 +
70422 vma->anon_vma = anon_vma;
70423 avc->anon_vma = anon_vma;
70424 avc->vma = vma;
70425 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70426
70427 if (unlikely(allocated))
70428 put_anon_vma(allocated);
70429 +
70430 +#ifdef CONFIG_PAX_SEGMEXEC
70431 + if (unlikely(avc_m))
70432 + anon_vma_chain_free(avc_m);
70433 +#endif
70434 +
70435 if (unlikely(avc))
70436 anon_vma_chain_free(avc);
70437 }
70438 return 0;
70439
70440 out_enomem_free_avc:
70441 +
70442 +#ifdef CONFIG_PAX_SEGMEXEC
70443 + if (avc_m)
70444 + anon_vma_chain_free(avc_m);
70445 +#endif
70446 +
70447 anon_vma_chain_free(avc);
70448 out_enomem:
70449 return -ENOMEM;
70450 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70451 * Attach the anon_vmas from src to dst.
70452 * Returns 0 on success, -ENOMEM on failure.
70453 */
70454 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70455 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70456 {
70457 struct anon_vma_chain *avc, *pavc;
70458 struct anon_vma *root = NULL;
70459 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70460 * the corresponding VMA in the parent process is attached to.
70461 * Returns 0 on success, non-zero on failure.
70462 */
70463 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70464 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70465 {
70466 struct anon_vma_chain *avc;
70467 struct anon_vma *anon_vma;
70468 diff --git a/mm/shmem.c b/mm/shmem.c
70469 index 6c253f7..367e20a 100644
70470 --- a/mm/shmem.c
70471 +++ b/mm/shmem.c
70472 @@ -31,7 +31,7 @@
70473 #include <linux/export.h>
70474 #include <linux/swap.h>
70475
70476 -static struct vfsmount *shm_mnt;
70477 +struct vfsmount *shm_mnt;
70478
70479 #ifdef CONFIG_SHMEM
70480 /*
70481 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70482 #define BOGO_DIRENT_SIZE 20
70483
70484 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70485 -#define SHORT_SYMLINK_LEN 128
70486 +#define SHORT_SYMLINK_LEN 64
70487
70488 struct shmem_xattr {
70489 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70490 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70491 int err = -ENOMEM;
70492
70493 /* Round up to L1_CACHE_BYTES to resist false sharing */
70494 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70495 - L1_CACHE_BYTES), GFP_KERNEL);
70496 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70497 if (!sbinfo)
70498 return -ENOMEM;
70499
70500 diff --git a/mm/slab.c b/mm/slab.c
70501 index 83311c9a..fcf8f86 100644
70502 --- a/mm/slab.c
70503 +++ b/mm/slab.c
70504 @@ -151,7 +151,7 @@
70505
70506 /* Legal flag mask for kmem_cache_create(). */
70507 #if DEBUG
70508 -# define CREATE_MASK (SLAB_RED_ZONE | \
70509 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70510 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70511 SLAB_CACHE_DMA | \
70512 SLAB_STORE_USER | \
70513 @@ -159,7 +159,7 @@
70514 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70515 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70516 #else
70517 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70518 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70519 SLAB_CACHE_DMA | \
70520 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70521 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70522 @@ -288,7 +288,7 @@ struct kmem_list3 {
70523 * Need this for bootstrapping a per node allocator.
70524 */
70525 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70526 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70527 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70528 #define CACHE_CACHE 0
70529 #define SIZE_AC MAX_NUMNODES
70530 #define SIZE_L3 (2 * MAX_NUMNODES)
70531 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70532 if ((x)->max_freeable < i) \
70533 (x)->max_freeable = i; \
70534 } while (0)
70535 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70536 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70537 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70538 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70539 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70540 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70541 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70542 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70543 #else
70544 #define STATS_INC_ACTIVE(x) do { } while (0)
70545 #define STATS_DEC_ACTIVE(x) do { } while (0)
70546 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70547 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70548 */
70549 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70550 - const struct slab *slab, void *obj)
70551 + const struct slab *slab, const void *obj)
70552 {
70553 u32 offset = (obj - slab->s_mem);
70554 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70555 @@ -564,7 +564,7 @@ struct cache_names {
70556 static struct cache_names __initdata cache_names[] = {
70557 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70558 #include <linux/kmalloc_sizes.h>
70559 - {NULL,}
70560 + {NULL}
70561 #undef CACHE
70562 };
70563
70564 @@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
70565 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70566 sizes[INDEX_AC].cs_size,
70567 ARCH_KMALLOC_MINALIGN,
70568 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70569 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70570 NULL);
70571
70572 if (INDEX_AC != INDEX_L3) {
70573 @@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
70574 kmem_cache_create(names[INDEX_L3].name,
70575 sizes[INDEX_L3].cs_size,
70576 ARCH_KMALLOC_MINALIGN,
70577 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70578 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70579 NULL);
70580 }
70581
70582 @@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
70583 sizes->cs_cachep = kmem_cache_create(names->name,
70584 sizes->cs_size,
70585 ARCH_KMALLOC_MINALIGN,
70586 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70587 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70588 NULL);
70589 }
70590 #ifdef CONFIG_ZONE_DMA
70591 @@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
70592 }
70593 /* cpu stats */
70594 {
70595 - unsigned long allochit = atomic_read(&cachep->allochit);
70596 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70597 - unsigned long freehit = atomic_read(&cachep->freehit);
70598 - unsigned long freemiss = atomic_read(&cachep->freemiss);
70599 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70600 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70601 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70602 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70603
70604 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70605 allochit, allocmiss, freehit, freemiss);
70606 @@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
70607 {
70608 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
70609 #ifdef CONFIG_DEBUG_SLAB_LEAK
70610 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70611 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
70612 #endif
70613 return 0;
70614 }
70615 module_init(slab_proc_init);
70616 #endif
70617
70618 +void check_object_size(const void *ptr, unsigned long n, bool to)
70619 +{
70620 +
70621 +#ifdef CONFIG_PAX_USERCOPY
70622 + struct page *page;
70623 + struct kmem_cache *cachep = NULL;
70624 + struct slab *slabp;
70625 + unsigned int objnr;
70626 + unsigned long offset;
70627 + const char *type;
70628 +
70629 + if (!n)
70630 + return;
70631 +
70632 + type = "<null>";
70633 + if (ZERO_OR_NULL_PTR(ptr))
70634 + goto report;
70635 +
70636 + if (!virt_addr_valid(ptr))
70637 + return;
70638 +
70639 + page = virt_to_head_page(ptr);
70640 +
70641 + type = "<process stack>";
70642 + if (!PageSlab(page)) {
70643 + if (object_is_on_stack(ptr, n) == -1)
70644 + goto report;
70645 + return;
70646 + }
70647 +
70648 + cachep = page_get_cache(page);
70649 + type = cachep->name;
70650 + if (!(cachep->flags & SLAB_USERCOPY))
70651 + goto report;
70652 +
70653 + slabp = page_get_slab(page);
70654 + objnr = obj_to_index(cachep, slabp, ptr);
70655 + BUG_ON(objnr >= cachep->num);
70656 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70657 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70658 + return;
70659 +
70660 +report:
70661 + pax_report_usercopy(ptr, n, to, type);
70662 +#endif
70663 +
70664 +}
70665 +EXPORT_SYMBOL(check_object_size);
70666 +
70667 /**
70668 * ksize - get the actual amount of memory allocated for a given object
70669 * @objp: Pointer to the object
70670 diff --git a/mm/slob.c b/mm/slob.c
70671 index 8105be4..579da9d 100644
70672 --- a/mm/slob.c
70673 +++ b/mm/slob.c
70674 @@ -29,7 +29,7 @@
70675 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70676 * alloc_pages() directly, allocating compound pages so the page order
70677 * does not have to be separately tracked, and also stores the exact
70678 - * allocation size in page->private so that it can be used to accurately
70679 + * allocation size in slob_page->size so that it can be used to accurately
70680 * provide ksize(). These objects are detected in kfree() because slob_page()
70681 * is false for them.
70682 *
70683 @@ -58,6 +58,7 @@
70684 */
70685
70686 #include <linux/kernel.h>
70687 +#include <linux/sched.h>
70688 #include <linux/slab.h>
70689 #include <linux/mm.h>
70690 #include <linux/swap.h> /* struct reclaim_state */
70691 @@ -102,7 +103,8 @@ struct slob_page {
70692 unsigned long flags; /* mandatory */
70693 atomic_t _count; /* mandatory */
70694 slobidx_t units; /* free units left in page */
70695 - unsigned long pad[2];
70696 + unsigned long pad[1];
70697 + unsigned long size; /* size when >=PAGE_SIZE */
70698 slob_t *free; /* first free slob_t in page */
70699 struct list_head list; /* linked list of free pages */
70700 };
70701 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70702 */
70703 static inline int is_slob_page(struct slob_page *sp)
70704 {
70705 - return PageSlab((struct page *)sp);
70706 + return PageSlab((struct page *)sp) && !sp->size;
70707 }
70708
70709 static inline void set_slob_page(struct slob_page *sp)
70710 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
70711
70712 static inline struct slob_page *slob_page(const void *addr)
70713 {
70714 - return (struct slob_page *)virt_to_page(addr);
70715 + return (struct slob_page *)virt_to_head_page(addr);
70716 }
70717
70718 /*
70719 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
70720 /*
70721 * Return the size of a slob block.
70722 */
70723 -static slobidx_t slob_units(slob_t *s)
70724 +static slobidx_t slob_units(const slob_t *s)
70725 {
70726 if (s->units > 0)
70727 return s->units;
70728 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70729 /*
70730 * Return the next free slob block pointer after this one.
70731 */
70732 -static slob_t *slob_next(slob_t *s)
70733 +static slob_t *slob_next(const slob_t *s)
70734 {
70735 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70736 slobidx_t next;
70737 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70738 /*
70739 * Returns true if s is the last free block in its page.
70740 */
70741 -static int slob_last(slob_t *s)
70742 +static int slob_last(const slob_t *s)
70743 {
70744 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70745 }
70746 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
70747 if (!page)
70748 return NULL;
70749
70750 + set_slob_page(page);
70751 return page_address(page);
70752 }
70753
70754 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
70755 if (!b)
70756 return NULL;
70757 sp = slob_page(b);
70758 - set_slob_page(sp);
70759
70760 spin_lock_irqsave(&slob_lock, flags);
70761 sp->units = SLOB_UNITS(PAGE_SIZE);
70762 sp->free = b;
70763 + sp->size = 0;
70764 INIT_LIST_HEAD(&sp->list);
70765 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70766 set_slob_page_free(sp, slob_list);
70767 @@ -476,10 +479,9 @@ out:
70768 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70769 */
70770
70771 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70772 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70773 {
70774 - unsigned int *m;
70775 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70776 + slob_t *m;
70777 void *ret;
70778
70779 gfp &= gfp_allowed_mask;
70780 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70781
70782 if (!m)
70783 return NULL;
70784 - *m = size;
70785 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70786 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70787 + m[0].units = size;
70788 + m[1].units = align;
70789 ret = (void *)m + align;
70790
70791 trace_kmalloc_node(_RET_IP_, ret,
70792 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70793 gfp |= __GFP_COMP;
70794 ret = slob_new_pages(gfp, order, node);
70795 if (ret) {
70796 - struct page *page;
70797 - page = virt_to_page(ret);
70798 - page->private = size;
70799 + struct slob_page *sp;
70800 + sp = slob_page(ret);
70801 + sp->size = size;
70802 }
70803
70804 trace_kmalloc_node(_RET_IP_, ret,
70805 size, PAGE_SIZE << order, gfp, node);
70806 }
70807
70808 - kmemleak_alloc(ret, size, 1, gfp);
70809 + return ret;
70810 +}
70811 +
70812 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70813 +{
70814 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70815 + void *ret = __kmalloc_node_align(size, gfp, node, align);
70816 +
70817 + if (!ZERO_OR_NULL_PTR(ret))
70818 + kmemleak_alloc(ret, size, 1, gfp);
70819 return ret;
70820 }
70821 EXPORT_SYMBOL(__kmalloc_node);
70822 @@ -533,13 +547,92 @@ void kfree(const void *block)
70823 sp = slob_page(block);
70824 if (is_slob_page(sp)) {
70825 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70826 - unsigned int *m = (unsigned int *)(block - align);
70827 - slob_free(m, *m + align);
70828 - } else
70829 + slob_t *m = (slob_t *)(block - align);
70830 + slob_free(m, m[0].units + align);
70831 + } else {
70832 + clear_slob_page(sp);
70833 + free_slob_page(sp);
70834 + sp->size = 0;
70835 put_page(&sp->page);
70836 + }
70837 }
70838 EXPORT_SYMBOL(kfree);
70839
70840 +void check_object_size(const void *ptr, unsigned long n, bool to)
70841 +{
70842 +
70843 +#ifdef CONFIG_PAX_USERCOPY
70844 + struct slob_page *sp;
70845 + const slob_t *free;
70846 + const void *base;
70847 + unsigned long flags;
70848 + const char *type;
70849 +
70850 + if (!n)
70851 + return;
70852 +
70853 + type = "<null>";
70854 + if (ZERO_OR_NULL_PTR(ptr))
70855 + goto report;
70856 +
70857 + if (!virt_addr_valid(ptr))
70858 + return;
70859 +
70860 + type = "<process stack>";
70861 + sp = slob_page(ptr);
70862 + if (!PageSlab((struct page*)sp)) {
70863 + if (object_is_on_stack(ptr, n) == -1)
70864 + goto report;
70865 + return;
70866 + }
70867 +
70868 + type = "<slob>";
70869 + if (sp->size) {
70870 + base = page_address(&sp->page);
70871 + if (base <= ptr && n <= sp->size - (ptr - base))
70872 + return;
70873 + goto report;
70874 + }
70875 +
70876 + /* some tricky double walking to find the chunk */
70877 + spin_lock_irqsave(&slob_lock, flags);
70878 + base = (void *)((unsigned long)ptr & PAGE_MASK);
70879 + free = sp->free;
70880 +
70881 + while (!slob_last(free) && (void *)free <= ptr) {
70882 + base = free + slob_units(free);
70883 + free = slob_next(free);
70884 + }
70885 +
70886 + while (base < (void *)free) {
70887 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70888 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
70889 + int offset;
70890 +
70891 + if (ptr < base + align)
70892 + break;
70893 +
70894 + offset = ptr - base - align;
70895 + if (offset >= m) {
70896 + base += size;
70897 + continue;
70898 + }
70899 +
70900 + if (n > m - offset)
70901 + break;
70902 +
70903 + spin_unlock_irqrestore(&slob_lock, flags);
70904 + return;
70905 + }
70906 +
70907 + spin_unlock_irqrestore(&slob_lock, flags);
70908 +report:
70909 + pax_report_usercopy(ptr, n, to, type);
70910 +#endif
70911 +
70912 +}
70913 +EXPORT_SYMBOL(check_object_size);
70914 +
70915 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70916 size_t ksize(const void *block)
70917 {
70918 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
70919 sp = slob_page(block);
70920 if (is_slob_page(sp)) {
70921 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70922 - unsigned int *m = (unsigned int *)(block - align);
70923 - return SLOB_UNITS(*m) * SLOB_UNIT;
70924 + slob_t *m = (slob_t *)(block - align);
70925 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70926 } else
70927 - return sp->page.private;
70928 + return sp->size;
70929 }
70930 EXPORT_SYMBOL(ksize);
70931
70932 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
70933 {
70934 struct kmem_cache *c;
70935
70936 +#ifdef CONFIG_PAX_USERCOPY
70937 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
70938 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70939 +#else
70940 c = slob_alloc(sizeof(struct kmem_cache),
70941 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70942 +#endif
70943
70944 if (c) {
70945 c->name = name;
70946 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
70947
70948 lockdep_trace_alloc(flags);
70949
70950 +#ifdef CONFIG_PAX_USERCOPY
70951 + b = __kmalloc_node_align(c->size, flags, node, c->align);
70952 +#else
70953 if (c->size < PAGE_SIZE) {
70954 b = slob_alloc(c->size, flags, c->align, node);
70955 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70956 SLOB_UNITS(c->size) * SLOB_UNIT,
70957 flags, node);
70958 } else {
70959 + struct slob_page *sp;
70960 +
70961 b = slob_new_pages(flags, get_order(c->size), node);
70962 + sp = slob_page(b);
70963 + sp->size = c->size;
70964 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70965 PAGE_SIZE << get_order(c->size),
70966 flags, node);
70967 }
70968 +#endif
70969
70970 if (c->ctor)
70971 c->ctor(b);
70972 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70973
70974 static void __kmem_cache_free(void *b, int size)
70975 {
70976 - if (size < PAGE_SIZE)
70977 + struct slob_page *sp = slob_page(b);
70978 +
70979 + if (is_slob_page(sp))
70980 slob_free(b, size);
70981 - else
70982 + else {
70983 + clear_slob_page(sp);
70984 + free_slob_page(sp);
70985 + sp->size = 0;
70986 slob_free_pages(b, get_order(size));
70987 + }
70988 }
70989
70990 static void kmem_rcu_free(struct rcu_head *head)
70991 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
70992
70993 void kmem_cache_free(struct kmem_cache *c, void *b)
70994 {
70995 + int size = c->size;
70996 +
70997 +#ifdef CONFIG_PAX_USERCOPY
70998 + if (size + c->align < PAGE_SIZE) {
70999 + size += c->align;
71000 + b -= c->align;
71001 + }
71002 +#endif
71003 +
71004 kmemleak_free_recursive(b, c->flags);
71005 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71006 struct slob_rcu *slob_rcu;
71007 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71008 - slob_rcu->size = c->size;
71009 + slob_rcu = b + (size - sizeof(struct slob_rcu));
71010 + slob_rcu->size = size;
71011 call_rcu(&slob_rcu->head, kmem_rcu_free);
71012 } else {
71013 - __kmem_cache_free(b, c->size);
71014 + __kmem_cache_free(b, size);
71015 }
71016
71017 +#ifdef CONFIG_PAX_USERCOPY
71018 + trace_kfree(_RET_IP_, b);
71019 +#else
71020 trace_kmem_cache_free(_RET_IP_, b);
71021 +#endif
71022 +
71023 }
71024 EXPORT_SYMBOL(kmem_cache_free);
71025
71026 diff --git a/mm/slub.c b/mm/slub.c
71027 index 1a919f0..1739c9b 100644
71028 --- a/mm/slub.c
71029 +++ b/mm/slub.c
71030 @@ -208,7 +208,7 @@ struct track {
71031
71032 enum track_item { TRACK_ALLOC, TRACK_FREE };
71033
71034 -#ifdef CONFIG_SYSFS
71035 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71036 static int sysfs_slab_add(struct kmem_cache *);
71037 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71038 static void sysfs_slab_remove(struct kmem_cache *);
71039 @@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71040 if (!t->addr)
71041 return;
71042
71043 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71044 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71045 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71046 #ifdef CONFIG_STACKTRACE
71047 {
71048 @@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71049
71050 page = virt_to_head_page(x);
71051
71052 + BUG_ON(!PageSlab(page));
71053 +
71054 slab_free(s, page, x, _RET_IP_);
71055
71056 trace_kmem_cache_free(_RET_IP_, x);
71057 @@ -2592,7 +2594,7 @@ static int slub_min_objects;
71058 * Merge control. If this is set then no merging of slab caches will occur.
71059 * (Could be removed. This was introduced to pacify the merge skeptics.)
71060 */
71061 -static int slub_nomerge;
71062 +static int slub_nomerge = 1;
71063
71064 /*
71065 * Calculate the order of allocation given an slab object size.
71066 @@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71067 else
71068 s->cpu_partial = 30;
71069
71070 - s->refcount = 1;
71071 + atomic_set(&s->refcount, 1);
71072 #ifdef CONFIG_NUMA
71073 s->remote_node_defrag_ratio = 1000;
71074 #endif
71075 @@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71076 void kmem_cache_destroy(struct kmem_cache *s)
71077 {
71078 down_write(&slub_lock);
71079 - s->refcount--;
71080 - if (!s->refcount) {
71081 + if (atomic_dec_and_test(&s->refcount)) {
71082 list_del(&s->list);
71083 up_write(&slub_lock);
71084 if (kmem_cache_close(s)) {
71085 @@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71086 EXPORT_SYMBOL(__kmalloc_node);
71087 #endif
71088
71089 +void check_object_size(const void *ptr, unsigned long n, bool to)
71090 +{
71091 +
71092 +#ifdef CONFIG_PAX_USERCOPY
71093 + struct page *page;
71094 + struct kmem_cache *s = NULL;
71095 + unsigned long offset;
71096 + const char *type;
71097 +
71098 + if (!n)
71099 + return;
71100 +
71101 + type = "<null>";
71102 + if (ZERO_OR_NULL_PTR(ptr))
71103 + goto report;
71104 +
71105 + if (!virt_addr_valid(ptr))
71106 + return;
71107 +
71108 + page = virt_to_head_page(ptr);
71109 +
71110 + type = "<process stack>";
71111 + if (!PageSlab(page)) {
71112 + if (object_is_on_stack(ptr, n) == -1)
71113 + goto report;
71114 + return;
71115 + }
71116 +
71117 + s = page->slab;
71118 + type = s->name;
71119 + if (!(s->flags & SLAB_USERCOPY))
71120 + goto report;
71121 +
71122 + offset = (ptr - page_address(page)) % s->size;
71123 + if (offset <= s->objsize && n <= s->objsize - offset)
71124 + return;
71125 +
71126 +report:
71127 + pax_report_usercopy(ptr, n, to, type);
71128 +#endif
71129 +
71130 +}
71131 +EXPORT_SYMBOL(check_object_size);
71132 +
71133 size_t ksize(const void *object)
71134 {
71135 struct page *page;
71136 @@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71137 int node;
71138
71139 list_add(&s->list, &slab_caches);
71140 - s->refcount = -1;
71141 + atomic_set(&s->refcount, -1);
71142
71143 for_each_node_state(node, N_NORMAL_MEMORY) {
71144 struct kmem_cache_node *n = get_node(s, node);
71145 @@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71146
71147 /* Caches that are not of the two-to-the-power-of size */
71148 if (KMALLOC_MIN_SIZE <= 32) {
71149 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71150 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71151 caches++;
71152 }
71153
71154 if (KMALLOC_MIN_SIZE <= 64) {
71155 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71156 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71157 caches++;
71158 }
71159
71160 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71161 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71162 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71163 caches++;
71164 }
71165
71166 @@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71167 /*
71168 * We may have set a slab to be unmergeable during bootstrap.
71169 */
71170 - if (s->refcount < 0)
71171 + if (atomic_read(&s->refcount) < 0)
71172 return 1;
71173
71174 return 0;
71175 @@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71176 down_write(&slub_lock);
71177 s = find_mergeable(size, align, flags, name, ctor);
71178 if (s) {
71179 - s->refcount++;
71180 + atomic_inc(&s->refcount);
71181 /*
71182 * Adjust the object sizes so that we clear
71183 * the complete object on kzalloc.
71184 @@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71185 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71186
71187 if (sysfs_slab_alias(s, name)) {
71188 - s->refcount--;
71189 + atomic_dec(&s->refcount);
71190 goto err;
71191 }
71192 up_write(&slub_lock);
71193 @@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71194 }
71195 #endif
71196
71197 -#ifdef CONFIG_SYSFS
71198 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71199 static int count_inuse(struct page *page)
71200 {
71201 return page->inuse;
71202 @@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71203 validate_slab_cache(kmalloc_caches[9]);
71204 }
71205 #else
71206 -#ifdef CONFIG_SYSFS
71207 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71208 static void resiliency_test(void) {};
71209 #endif
71210 #endif
71211
71212 -#ifdef CONFIG_SYSFS
71213 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71214 enum slab_stat_type {
71215 SL_ALL, /* All slabs */
71216 SL_PARTIAL, /* Only partially allocated slabs */
71217 @@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71218
71219 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71220 {
71221 - return sprintf(buf, "%d\n", s->refcount - 1);
71222 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71223 }
71224 SLAB_ATTR_RO(aliases);
71225
71226 @@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71227 return name;
71228 }
71229
71230 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71231 static int sysfs_slab_add(struct kmem_cache *s)
71232 {
71233 int err;
71234 @@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71235 kobject_del(&s->kobj);
71236 kobject_put(&s->kobj);
71237 }
71238 +#endif
71239
71240 /*
71241 * Need to buffer aliases during bootup until sysfs becomes
71242 @@ -5298,6 +5345,7 @@ struct saved_alias {
71243
71244 static struct saved_alias *alias_list;
71245
71246 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71247 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71248 {
71249 struct saved_alias *al;
71250 @@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71251 alias_list = al;
71252 return 0;
71253 }
71254 +#endif
71255
71256 static int __init slab_sysfs_init(void)
71257 {
71258 diff --git a/mm/swap.c b/mm/swap.c
71259 index a91caf7..b887e735 100644
71260 --- a/mm/swap.c
71261 +++ b/mm/swap.c
71262 @@ -31,6 +31,7 @@
71263 #include <linux/backing-dev.h>
71264 #include <linux/memcontrol.h>
71265 #include <linux/gfp.h>
71266 +#include <linux/hugetlb.h>
71267
71268 #include "internal.h"
71269
71270 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71271
71272 __page_cache_release(page);
71273 dtor = get_compound_page_dtor(page);
71274 + if (!PageHuge(page))
71275 + BUG_ON(dtor != free_compound_page);
71276 (*dtor)(page);
71277 }
71278
71279 diff --git a/mm/swapfile.c b/mm/swapfile.c
71280 index b1cd120..aaae885 100644
71281 --- a/mm/swapfile.c
71282 +++ b/mm/swapfile.c
71283 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71284
71285 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71286 /* Activity counter to indicate that a swapon or swapoff has occurred */
71287 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
71288 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71289
71290 static inline unsigned char swap_count(unsigned char ent)
71291 {
71292 @@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71293 }
71294 filp_close(swap_file, NULL);
71295 err = 0;
71296 - atomic_inc(&proc_poll_event);
71297 + atomic_inc_unchecked(&proc_poll_event);
71298 wake_up_interruptible(&proc_poll_wait);
71299
71300 out_dput:
71301 @@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71302
71303 poll_wait(file, &proc_poll_wait, wait);
71304
71305 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
71306 - seq->poll_event = atomic_read(&proc_poll_event);
71307 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71308 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71309 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71310 }
71311
71312 @@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71313 return ret;
71314
71315 seq = file->private_data;
71316 - seq->poll_event = atomic_read(&proc_poll_event);
71317 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71318 return 0;
71319 }
71320
71321 @@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71322 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71323
71324 mutex_unlock(&swapon_mutex);
71325 - atomic_inc(&proc_poll_event);
71326 + atomic_inc_unchecked(&proc_poll_event);
71327 wake_up_interruptible(&proc_poll_wait);
71328
71329 if (S_ISREG(inode->i_mode))
71330 diff --git a/mm/util.c b/mm/util.c
71331 index 136ac4f..5117eef 100644
71332 --- a/mm/util.c
71333 +++ b/mm/util.c
71334 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71335 * allocated buffer. Use this if you don't want to free the buffer immediately
71336 * like, for example, with RCU.
71337 */
71338 +#undef __krealloc
71339 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71340 {
71341 void *ret;
71342 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71343 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71344 * %NULL pointer, the object pointed to is freed.
71345 */
71346 +#undef krealloc
71347 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71348 {
71349 void *ret;
71350 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71351 void arch_pick_mmap_layout(struct mm_struct *mm)
71352 {
71353 mm->mmap_base = TASK_UNMAPPED_BASE;
71354 +
71355 +#ifdef CONFIG_PAX_RANDMMAP
71356 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71357 + mm->mmap_base += mm->delta_mmap;
71358 +#endif
71359 +
71360 mm->get_unmapped_area = arch_get_unmapped_area;
71361 mm->unmap_area = arch_unmap_area;
71362 }
71363 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71364 index 27be2f0..0aef2c2 100644
71365 --- a/mm/vmalloc.c
71366 +++ b/mm/vmalloc.c
71367 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71368
71369 pte = pte_offset_kernel(pmd, addr);
71370 do {
71371 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71372 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71373 +
71374 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71375 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71376 + BUG_ON(!pte_exec(*pte));
71377 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71378 + continue;
71379 + }
71380 +#endif
71381 +
71382 + {
71383 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71384 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71385 + }
71386 } while (pte++, addr += PAGE_SIZE, addr != end);
71387 }
71388
71389 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71390 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71391 {
71392 pte_t *pte;
71393 + int ret = -ENOMEM;
71394
71395 /*
71396 * nr is a running index into the array which helps higher level
71397 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71398 pte = pte_alloc_kernel(pmd, addr);
71399 if (!pte)
71400 return -ENOMEM;
71401 +
71402 + pax_open_kernel();
71403 do {
71404 struct page *page = pages[*nr];
71405
71406 - if (WARN_ON(!pte_none(*pte)))
71407 - return -EBUSY;
71408 - if (WARN_ON(!page))
71409 - return -ENOMEM;
71410 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71411 + if (pgprot_val(prot) & _PAGE_NX)
71412 +#endif
71413 +
71414 + if (WARN_ON(!pte_none(*pte))) {
71415 + ret = -EBUSY;
71416 + goto out;
71417 + }
71418 + if (WARN_ON(!page)) {
71419 + ret = -ENOMEM;
71420 + goto out;
71421 + }
71422 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71423 (*nr)++;
71424 } while (pte++, addr += PAGE_SIZE, addr != end);
71425 - return 0;
71426 + ret = 0;
71427 +out:
71428 + pax_close_kernel();
71429 + return ret;
71430 }
71431
71432 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71433 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71434 * and fall back on vmalloc() if that fails. Others
71435 * just put it in the vmalloc space.
71436 */
71437 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71438 +#ifdef CONFIG_MODULES
71439 +#ifdef MODULES_VADDR
71440 unsigned long addr = (unsigned long)x;
71441 if (addr >= MODULES_VADDR && addr < MODULES_END)
71442 return 1;
71443 #endif
71444 +
71445 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71446 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71447 + return 1;
71448 +#endif
71449 +
71450 +#endif
71451 +
71452 return is_vmalloc_addr(x);
71453 }
71454
71455 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71456
71457 if (!pgd_none(*pgd)) {
71458 pud_t *pud = pud_offset(pgd, addr);
71459 +#ifdef CONFIG_X86
71460 + if (!pud_large(*pud))
71461 +#endif
71462 if (!pud_none(*pud)) {
71463 pmd_t *pmd = pmd_offset(pud, addr);
71464 +#ifdef CONFIG_X86
71465 + if (!pmd_large(*pmd))
71466 +#endif
71467 if (!pmd_none(*pmd)) {
71468 pte_t *ptep, pte;
71469
71470 @@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71471 struct vm_struct *area;
71472
71473 BUG_ON(in_interrupt());
71474 +
71475 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71476 + if (flags & VM_KERNEXEC) {
71477 + if (start != VMALLOC_START || end != VMALLOC_END)
71478 + return NULL;
71479 + start = (unsigned long)MODULES_EXEC_VADDR;
71480 + end = (unsigned long)MODULES_EXEC_END;
71481 + }
71482 +#endif
71483 +
71484 if (flags & VM_IOREMAP) {
71485 int bit = fls(size);
71486
71487 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71488 if (count > totalram_pages)
71489 return NULL;
71490
71491 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71492 + if (!(pgprot_val(prot) & _PAGE_NX))
71493 + flags |= VM_KERNEXEC;
71494 +#endif
71495 +
71496 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71497 __builtin_return_address(0));
71498 if (!area)
71499 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71500 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71501 goto fail;
71502
71503 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71504 + if (!(pgprot_val(prot) & _PAGE_NX))
71505 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71506 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71507 + else
71508 +#endif
71509 +
71510 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71511 start, end, node, gfp_mask, caller);
71512 if (!area)
71513 @@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71514 gfp_mask, prot, node, caller);
71515 }
71516
71517 +#undef __vmalloc
71518 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71519 {
71520 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71521 @@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71522 * For tight control over page level allocator and protection flags
71523 * use __vmalloc() instead.
71524 */
71525 +#undef vmalloc
71526 void *vmalloc(unsigned long size)
71527 {
71528 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71529 @@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71530 * For tight control over page level allocator and protection flags
71531 * use __vmalloc() instead.
71532 */
71533 +#undef vzalloc
71534 void *vzalloc(unsigned long size)
71535 {
71536 return __vmalloc_node_flags(size, -1,
71537 @@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71538 * The resulting memory area is zeroed so it can be mapped to userspace
71539 * without leaking data.
71540 */
71541 +#undef vmalloc_user
71542 void *vmalloc_user(unsigned long size)
71543 {
71544 struct vm_struct *area;
71545 @@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71546 * For tight control over page level allocator and protection flags
71547 * use __vmalloc() instead.
71548 */
71549 +#undef vmalloc_node
71550 void *vmalloc_node(unsigned long size, int node)
71551 {
71552 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71553 @@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71554 * For tight control over page level allocator and protection flags
71555 * use __vmalloc_node() instead.
71556 */
71557 +#undef vzalloc_node
71558 void *vzalloc_node(unsigned long size, int node)
71559 {
71560 return __vmalloc_node_flags(size, node,
71561 @@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
71562 * For tight control over page level allocator and protection flags
71563 * use __vmalloc() instead.
71564 */
71565 -
71566 +#undef vmalloc_exec
71567 void *vmalloc_exec(unsigned long size)
71568 {
71569 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71570 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71571 -1, __builtin_return_address(0));
71572 }
71573
71574 @@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
71575 * Allocate enough 32bit PA addressable pages to cover @size from the
71576 * page level allocator and map them into contiguous kernel virtual space.
71577 */
71578 +#undef vmalloc_32
71579 void *vmalloc_32(unsigned long size)
71580 {
71581 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71582 @@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
71583 * The resulting memory area is 32bit addressable and zeroed so it can be
71584 * mapped to userspace without leaking data.
71585 */
71586 +#undef vmalloc_32_user
71587 void *vmalloc_32_user(unsigned long size)
71588 {
71589 struct vm_struct *area;
71590 @@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
71591 unsigned long uaddr = vma->vm_start;
71592 unsigned long usize = vma->vm_end - vma->vm_start;
71593
71594 + BUG_ON(vma->vm_mirror);
71595 +
71596 if ((PAGE_SIZE-1) & (unsigned long)addr)
71597 return -EINVAL;
71598
71599 diff --git a/mm/vmstat.c b/mm/vmstat.c
71600 index 8fd603b..cf0d930 100644
71601 --- a/mm/vmstat.c
71602 +++ b/mm/vmstat.c
71603 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71604 *
71605 * vm_stat contains the global counters
71606 */
71607 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71608 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71609 EXPORT_SYMBOL(vm_stat);
71610
71611 #ifdef CONFIG_SMP
71612 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71613 v = p->vm_stat_diff[i];
71614 p->vm_stat_diff[i] = 0;
71615 local_irq_restore(flags);
71616 - atomic_long_add(v, &zone->vm_stat[i]);
71617 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71618 global_diff[i] += v;
71619 #ifdef CONFIG_NUMA
71620 /* 3 seconds idle till flush */
71621 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71622
71623 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71624 if (global_diff[i])
71625 - atomic_long_add(global_diff[i], &vm_stat[i]);
71626 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71627 }
71628
71629 #endif
71630 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
71631 start_cpu_timer(cpu);
71632 #endif
71633 #ifdef CONFIG_PROC_FS
71634 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71635 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71636 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71637 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71638 + {
71639 + mode_t gr_mode = S_IRUGO;
71640 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
71641 + gr_mode = S_IRUSR;
71642 +#endif
71643 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71644 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71645 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71646 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71647 +#else
71648 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71649 +#endif
71650 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71651 + }
71652 #endif
71653 return 0;
71654 }
71655 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
71656 index 5471628..cef8398 100644
71657 --- a/net/8021q/vlan.c
71658 +++ b/net/8021q/vlan.c
71659 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
71660 err = -EPERM;
71661 if (!capable(CAP_NET_ADMIN))
71662 break;
71663 - if ((args.u.name_type >= 0) &&
71664 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71665 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71666 struct vlan_net *vn;
71667
71668 vn = net_generic(net, vlan_net_id);
71669 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
71670 index fdfdb57..38d368c 100644
71671 --- a/net/9p/trans_fd.c
71672 +++ b/net/9p/trans_fd.c
71673 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
71674 oldfs = get_fs();
71675 set_fs(get_ds());
71676 /* The cast to a user pointer is valid due to the set_fs() */
71677 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71678 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71679 set_fs(oldfs);
71680
71681 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71682 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
71683 index f41f026..fe76ea8 100644
71684 --- a/net/atm/atm_misc.c
71685 +++ b/net/atm/atm_misc.c
71686 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
71687 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71688 return 1;
71689 atm_return(vcc, truesize);
71690 - atomic_inc(&vcc->stats->rx_drop);
71691 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71692 return 0;
71693 }
71694 EXPORT_SYMBOL(atm_charge);
71695 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
71696 }
71697 }
71698 atm_return(vcc, guess);
71699 - atomic_inc(&vcc->stats->rx_drop);
71700 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71701 return NULL;
71702 }
71703 EXPORT_SYMBOL(atm_alloc_charge);
71704 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71705
71706 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71707 {
71708 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71709 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71710 __SONET_ITEMS
71711 #undef __HANDLE_ITEM
71712 }
71713 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71714
71715 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71716 {
71717 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71718 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71719 __SONET_ITEMS
71720 #undef __HANDLE_ITEM
71721 }
71722 diff --git a/net/atm/lec.h b/net/atm/lec.h
71723 index dfc0719..47c5322 100644
71724 --- a/net/atm/lec.h
71725 +++ b/net/atm/lec.h
71726 @@ -48,7 +48,7 @@ struct lane2_ops {
71727 const u8 *tlvs, u32 sizeoftlvs);
71728 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71729 const u8 *tlvs, u32 sizeoftlvs);
71730 -};
71731 +} __no_const;
71732
71733 /*
71734 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71735 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
71736 index 0919a88..a23d54e 100644
71737 --- a/net/atm/mpc.h
71738 +++ b/net/atm/mpc.h
71739 @@ -33,7 +33,7 @@ struct mpoa_client {
71740 struct mpc_parameters parameters; /* parameters for this client */
71741
71742 const struct net_device_ops *old_ops;
71743 - struct net_device_ops new_ops;
71744 + net_device_ops_no_const new_ops;
71745 };
71746
71747
71748 diff --git a/net/atm/proc.c b/net/atm/proc.c
71749 index 0d020de..011c7bb 100644
71750 --- a/net/atm/proc.c
71751 +++ b/net/atm/proc.c
71752 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
71753 const struct k_atm_aal_stats *stats)
71754 {
71755 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71756 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71757 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71758 - atomic_read(&stats->rx_drop));
71759 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71760 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71761 + atomic_read_unchecked(&stats->rx_drop));
71762 }
71763
71764 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71765 diff --git a/net/atm/resources.c b/net/atm/resources.c
71766 index 23f45ce..c748f1a 100644
71767 --- a/net/atm/resources.c
71768 +++ b/net/atm/resources.c
71769 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71770 static void copy_aal_stats(struct k_atm_aal_stats *from,
71771 struct atm_aal_stats *to)
71772 {
71773 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71774 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71775 __AAL_STAT_ITEMS
71776 #undef __HANDLE_ITEM
71777 }
71778 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
71779 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71780 struct atm_aal_stats *to)
71781 {
71782 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71783 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71784 __AAL_STAT_ITEMS
71785 #undef __HANDLE_ITEM
71786 }
71787 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
71788 index 3512e25..2b33401 100644
71789 --- a/net/batman-adv/bat_iv_ogm.c
71790 +++ b/net/batman-adv/bat_iv_ogm.c
71791 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71792
71793 /* change sequence number to network order */
71794 batman_ogm_packet->seqno =
71795 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
71796 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71797
71798 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
71799 batman_ogm_packet->tt_crc = htons((uint16_t)
71800 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71801 else
71802 batman_ogm_packet->gw_flags = NO_FLAGS;
71803
71804 - atomic_inc(&hard_iface->seqno);
71805 + atomic_inc_unchecked(&hard_iface->seqno);
71806
71807 slide_own_bcast_window(hard_iface);
71808 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
71809 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
71810 return;
71811
71812 /* could be changed by schedule_own_packet() */
71813 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
71814 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71815
71816 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
71817
71818 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
71819 index 7704df4..beb4e16 100644
71820 --- a/net/batman-adv/hard-interface.c
71821 +++ b/net/batman-adv/hard-interface.c
71822 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
71823 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71824 dev_add_pack(&hard_iface->batman_adv_ptype);
71825
71826 - atomic_set(&hard_iface->seqno, 1);
71827 - atomic_set(&hard_iface->frag_seqno, 1);
71828 + atomic_set_unchecked(&hard_iface->seqno, 1);
71829 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71830 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71831 hard_iface->net_dev->name);
71832
71833 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
71834 index f9cc957..efd9dae 100644
71835 --- a/net/batman-adv/soft-interface.c
71836 +++ b/net/batman-adv/soft-interface.c
71837 @@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
71838
71839 /* set broadcast sequence number */
71840 bcast_packet->seqno =
71841 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71842 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71843
71844 add_bcast_packet_to_list(bat_priv, skb, 1);
71845
71846 @@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
71847 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71848
71849 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71850 - atomic_set(&bat_priv->bcast_seqno, 1);
71851 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71852 atomic_set(&bat_priv->ttvn, 0);
71853 atomic_set(&bat_priv->tt_local_changes, 0);
71854 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
71855 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
71856 index ab8d0fe..ceba3fd 100644
71857 --- a/net/batman-adv/types.h
71858 +++ b/net/batman-adv/types.h
71859 @@ -38,8 +38,8 @@ struct hard_iface {
71860 int16_t if_num;
71861 char if_status;
71862 struct net_device *net_dev;
71863 - atomic_t seqno;
71864 - atomic_t frag_seqno;
71865 + atomic_unchecked_t seqno;
71866 + atomic_unchecked_t frag_seqno;
71867 unsigned char *packet_buff;
71868 int packet_len;
71869 struct kobject *hardif_obj;
71870 @@ -154,7 +154,7 @@ struct bat_priv {
71871 atomic_t orig_interval; /* uint */
71872 atomic_t hop_penalty; /* uint */
71873 atomic_t log_level; /* uint */
71874 - atomic_t bcast_seqno;
71875 + atomic_unchecked_t bcast_seqno;
71876 atomic_t bcast_queue_left;
71877 atomic_t batman_queue_left;
71878 atomic_t ttvn; /* translation table version number */
71879 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
71880 index 07d1c1d..7e9bea9 100644
71881 --- a/net/batman-adv/unicast.c
71882 +++ b/net/batman-adv/unicast.c
71883 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
71884 frag1->flags = UNI_FRAG_HEAD | large_tail;
71885 frag2->flags = large_tail;
71886
71887 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71888 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71889 frag1->seqno = htons(seqno - 1);
71890 frag2->seqno = htons(seqno);
71891
71892 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
71893 index c1c597e..05ebb40 100644
71894 --- a/net/bluetooth/hci_conn.c
71895 +++ b/net/bluetooth/hci_conn.c
71896 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
71897 memset(&cp, 0, sizeof(cp));
71898
71899 cp.handle = cpu_to_le16(conn->handle);
71900 - memcpy(cp.ltk, ltk, sizeof(ltk));
71901 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71902
71903 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
71904 }
71905 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
71906 index 17b5b1c..826d872 100644
71907 --- a/net/bluetooth/l2cap_core.c
71908 +++ b/net/bluetooth/l2cap_core.c
71909 @@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
71910 break;
71911
71912 case L2CAP_CONF_RFC:
71913 - if (olen == sizeof(rfc))
71914 - memcpy(&rfc, (void *)val, olen);
71915 + if (olen != sizeof(rfc))
71916 + break;
71917 +
71918 + memcpy(&rfc, (void *)val, olen);
71919
71920 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
71921 rfc.mode != chan->mode)
71922 @@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
71923
71924 switch (type) {
71925 case L2CAP_CONF_RFC:
71926 - if (olen == sizeof(rfc))
71927 - memcpy(&rfc, (void *)val, olen);
71928 + if (olen != sizeof(rfc))
71929 + break;
71930 +
71931 + memcpy(&rfc, (void *)val, olen);
71932 goto done;
71933 }
71934 }
71935 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
71936 index a5f4e57..910ee6d 100644
71937 --- a/net/bridge/br_multicast.c
71938 +++ b/net/bridge/br_multicast.c
71939 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
71940 nexthdr = ip6h->nexthdr;
71941 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71942
71943 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71944 + if (nexthdr != IPPROTO_ICMPV6)
71945 return 0;
71946
71947 /* Okay, we found ICMPv6 header */
71948 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
71949 index 5864cc4..121f3a3 100644
71950 --- a/net/bridge/netfilter/ebtables.c
71951 +++ b/net/bridge/netfilter/ebtables.c
71952 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
71953 tmp.valid_hooks = t->table->valid_hooks;
71954 }
71955 mutex_unlock(&ebt_mutex);
71956 - if (copy_to_user(user, &tmp, *len) != 0){
71957 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71958 BUGPRINT("c2u Didn't work\n");
71959 ret = -EFAULT;
71960 break;
71961 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
71962 index a986280..13444a1 100644
71963 --- a/net/caif/caif_socket.c
71964 +++ b/net/caif/caif_socket.c
71965 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71966 #ifdef CONFIG_DEBUG_FS
71967 struct debug_fs_counter {
71968 atomic_t caif_nr_socks;
71969 - atomic_t caif_sock_create;
71970 - atomic_t num_connect_req;
71971 - atomic_t num_connect_resp;
71972 - atomic_t num_connect_fail_resp;
71973 - atomic_t num_disconnect;
71974 - atomic_t num_remote_shutdown_ind;
71975 - atomic_t num_tx_flow_off_ind;
71976 - atomic_t num_tx_flow_on_ind;
71977 - atomic_t num_rx_flow_off;
71978 - atomic_t num_rx_flow_on;
71979 + atomic_unchecked_t caif_sock_create;
71980 + atomic_unchecked_t num_connect_req;
71981 + atomic_unchecked_t num_connect_resp;
71982 + atomic_unchecked_t num_connect_fail_resp;
71983 + atomic_unchecked_t num_disconnect;
71984 + atomic_unchecked_t num_remote_shutdown_ind;
71985 + atomic_unchecked_t num_tx_flow_off_ind;
71986 + atomic_unchecked_t num_tx_flow_on_ind;
71987 + atomic_unchecked_t num_rx_flow_off;
71988 + atomic_unchecked_t num_rx_flow_on;
71989 };
71990 static struct debug_fs_counter cnt;
71991 #define dbfs_atomic_inc(v) atomic_inc_return(v)
71992 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
71993 #define dbfs_atomic_dec(v) atomic_dec_return(v)
71994 #else
71995 #define dbfs_atomic_inc(v) 0
71996 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
71997 atomic_read(&cf_sk->sk.sk_rmem_alloc),
71998 sk_rcvbuf_lowwater(cf_sk));
71999 set_rx_flow_off(cf_sk);
72000 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72001 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72002 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72003 }
72004
72005 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72006 set_rx_flow_off(cf_sk);
72007 if (net_ratelimit())
72008 pr_debug("sending flow OFF due to rmem_schedule\n");
72009 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72010 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72011 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72012 }
72013 skb->dev = NULL;
72014 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72015 switch (flow) {
72016 case CAIF_CTRLCMD_FLOW_ON_IND:
72017 /* OK from modem to start sending again */
72018 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72019 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72020 set_tx_flow_on(cf_sk);
72021 cf_sk->sk.sk_state_change(&cf_sk->sk);
72022 break;
72023
72024 case CAIF_CTRLCMD_FLOW_OFF_IND:
72025 /* Modem asks us to shut up */
72026 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72027 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72028 set_tx_flow_off(cf_sk);
72029 cf_sk->sk.sk_state_change(&cf_sk->sk);
72030 break;
72031 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72032 /* We're now connected */
72033 caif_client_register_refcnt(&cf_sk->layer,
72034 cfsk_hold, cfsk_put);
72035 - dbfs_atomic_inc(&cnt.num_connect_resp);
72036 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72037 cf_sk->sk.sk_state = CAIF_CONNECTED;
72038 set_tx_flow_on(cf_sk);
72039 cf_sk->sk.sk_state_change(&cf_sk->sk);
72040 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72041
72042 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72043 /* Connect request failed */
72044 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72045 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72046 cf_sk->sk.sk_err = ECONNREFUSED;
72047 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72048 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72049 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72050
72051 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72052 /* Modem has closed this connection, or device is down. */
72053 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72054 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72055 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72056 cf_sk->sk.sk_err = ECONNRESET;
72057 set_rx_flow_on(cf_sk);
72058 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72059 return;
72060
72061 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72062 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
72063 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72064 set_rx_flow_on(cf_sk);
72065 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72066 }
72067 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72068 /*ifindex = id of the interface.*/
72069 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72070
72071 - dbfs_atomic_inc(&cnt.num_connect_req);
72072 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72073 cf_sk->layer.receive = caif_sktrecv_cb;
72074
72075 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72076 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72077 spin_unlock_bh(&sk->sk_receive_queue.lock);
72078 sock->sk = NULL;
72079
72080 - dbfs_atomic_inc(&cnt.num_disconnect);
72081 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72082
72083 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72084 if (cf_sk->debugfs_socket_dir != NULL)
72085 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72086 cf_sk->conn_req.protocol = protocol;
72087 /* Increase the number of sockets created. */
72088 dbfs_atomic_inc(&cnt.caif_nr_socks);
72089 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
72090 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72091 #ifdef CONFIG_DEBUG_FS
72092 if (!IS_ERR(debugfsdir)) {
72093
72094 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72095 index 5cf5222..6f704ad 100644
72096 --- a/net/caif/cfctrl.c
72097 +++ b/net/caif/cfctrl.c
72098 @@ -9,6 +9,7 @@
72099 #include <linux/stddef.h>
72100 #include <linux/spinlock.h>
72101 #include <linux/slab.h>
72102 +#include <linux/sched.h>
72103 #include <net/caif/caif_layer.h>
72104 #include <net/caif/cfpkt.h>
72105 #include <net/caif/cfctrl.h>
72106 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72107 memset(&dev_info, 0, sizeof(dev_info));
72108 dev_info.id = 0xff;
72109 cfsrvl_init(&this->serv, 0, &dev_info, false);
72110 - atomic_set(&this->req_seq_no, 1);
72111 - atomic_set(&this->rsp_seq_no, 1);
72112 + atomic_set_unchecked(&this->req_seq_no, 1);
72113 + atomic_set_unchecked(&this->rsp_seq_no, 1);
72114 this->serv.layer.receive = cfctrl_recv;
72115 sprintf(this->serv.layer.name, "ctrl");
72116 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72117 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72118 struct cfctrl_request_info *req)
72119 {
72120 spin_lock_bh(&ctrl->info_list_lock);
72121 - atomic_inc(&ctrl->req_seq_no);
72122 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
72123 + atomic_inc_unchecked(&ctrl->req_seq_no);
72124 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72125 list_add_tail(&req->list, &ctrl->list);
72126 spin_unlock_bh(&ctrl->info_list_lock);
72127 }
72128 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72129 if (p != first)
72130 pr_warn("Requests are not received in order\n");
72131
72132 - atomic_set(&ctrl->rsp_seq_no,
72133 + atomic_set_unchecked(&ctrl->rsp_seq_no,
72134 p->sequence_no);
72135 list_del(&p->list);
72136 goto out;
72137 diff --git a/net/can/gw.c b/net/can/gw.c
72138 index 3d79b12..8de85fa 100644
72139 --- a/net/can/gw.c
72140 +++ b/net/can/gw.c
72141 @@ -96,7 +96,7 @@ struct cf_mod {
72142 struct {
72143 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72144 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72145 - } csumfunc;
72146 + } __no_const csumfunc;
72147 };
72148
72149
72150 diff --git a/net/compat.c b/net/compat.c
72151 index 6def90e..c6992fa 100644
72152 --- a/net/compat.c
72153 +++ b/net/compat.c
72154 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72155 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72156 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72157 return -EFAULT;
72158 - kmsg->msg_name = compat_ptr(tmp1);
72159 - kmsg->msg_iov = compat_ptr(tmp2);
72160 - kmsg->msg_control = compat_ptr(tmp3);
72161 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72162 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72163 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72164 return 0;
72165 }
72166
72167 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72168
72169 if (kern_msg->msg_namelen) {
72170 if (mode == VERIFY_READ) {
72171 - int err = move_addr_to_kernel(kern_msg->msg_name,
72172 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72173 kern_msg->msg_namelen,
72174 kern_address);
72175 if (err < 0)
72176 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72177 kern_msg->msg_name = NULL;
72178
72179 tot_len = iov_from_user_compat_to_kern(kern_iov,
72180 - (struct compat_iovec __user *)kern_msg->msg_iov,
72181 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
72182 kern_msg->msg_iovlen);
72183 if (tot_len >= 0)
72184 kern_msg->msg_iov = kern_iov;
72185 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72186
72187 #define CMSG_COMPAT_FIRSTHDR(msg) \
72188 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72189 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72190 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72191 (struct compat_cmsghdr __user *)NULL)
72192
72193 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72194 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72195 (ucmlen) <= (unsigned long) \
72196 ((mhdr)->msg_controllen - \
72197 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72198 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72199
72200 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72201 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72202 {
72203 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72204 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72205 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72206 msg->msg_controllen)
72207 return NULL;
72208 return (struct compat_cmsghdr __user *)ptr;
72209 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72210 {
72211 struct compat_timeval ctv;
72212 struct compat_timespec cts[3];
72213 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72214 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72215 struct compat_cmsghdr cmhdr;
72216 int cmlen;
72217
72218 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72219
72220 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72221 {
72222 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72223 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72224 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72225 int fdnum = scm->fp->count;
72226 struct file **fp = scm->fp->fp;
72227 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72228 return -EFAULT;
72229 old_fs = get_fs();
72230 set_fs(KERNEL_DS);
72231 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72232 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72233 set_fs(old_fs);
72234
72235 return err;
72236 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72237 len = sizeof(ktime);
72238 old_fs = get_fs();
72239 set_fs(KERNEL_DS);
72240 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72241 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72242 set_fs(old_fs);
72243
72244 if (!err) {
72245 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72246 case MCAST_JOIN_GROUP:
72247 case MCAST_LEAVE_GROUP:
72248 {
72249 - struct compat_group_req __user *gr32 = (void *)optval;
72250 + struct compat_group_req __user *gr32 = (void __user *)optval;
72251 struct group_req __user *kgr =
72252 compat_alloc_user_space(sizeof(struct group_req));
72253 u32 interface;
72254 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72255 case MCAST_BLOCK_SOURCE:
72256 case MCAST_UNBLOCK_SOURCE:
72257 {
72258 - struct compat_group_source_req __user *gsr32 = (void *)optval;
72259 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72260 struct group_source_req __user *kgsr = compat_alloc_user_space(
72261 sizeof(struct group_source_req));
72262 u32 interface;
72263 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72264 }
72265 case MCAST_MSFILTER:
72266 {
72267 - struct compat_group_filter __user *gf32 = (void *)optval;
72268 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72269 struct group_filter __user *kgf;
72270 u32 interface, fmode, numsrc;
72271
72272 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72273 char __user *optval, int __user *optlen,
72274 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72275 {
72276 - struct compat_group_filter __user *gf32 = (void *)optval;
72277 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72278 struct group_filter __user *kgf;
72279 int __user *koptlen;
72280 u32 interface, fmode, numsrc;
72281 diff --git a/net/core/datagram.c b/net/core/datagram.c
72282 index 68bbf9f..5ef0d12 100644
72283 --- a/net/core/datagram.c
72284 +++ b/net/core/datagram.c
72285 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72286 }
72287
72288 kfree_skb(skb);
72289 - atomic_inc(&sk->sk_drops);
72290 + atomic_inc_unchecked(&sk->sk_drops);
72291 sk_mem_reclaim_partial(sk);
72292
72293 return err;
72294 diff --git a/net/core/dev.c b/net/core/dev.c
72295 index 5a13edf..a6f2bd2 100644
72296 --- a/net/core/dev.c
72297 +++ b/net/core/dev.c
72298 @@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72299 if (no_module && capable(CAP_NET_ADMIN))
72300 no_module = request_module("netdev-%s", name);
72301 if (no_module && capable(CAP_SYS_MODULE)) {
72302 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72303 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
72304 +#else
72305 if (!request_module("%s", name))
72306 pr_err("Loading kernel module for a network device "
72307 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72308 "instead\n", name);
72309 +#endif
72310 }
72311 }
72312 EXPORT_SYMBOL(dev_load);
72313 @@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72314 {
72315 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72316 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72317 - atomic_long_inc(&dev->rx_dropped);
72318 + atomic_long_inc_unchecked(&dev->rx_dropped);
72319 kfree_skb(skb);
72320 return NET_RX_DROP;
72321 }
72322 @@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72323 nf_reset(skb);
72324
72325 if (unlikely(!is_skb_forwardable(dev, skb))) {
72326 - atomic_long_inc(&dev->rx_dropped);
72327 + atomic_long_inc_unchecked(&dev->rx_dropped);
72328 kfree_skb(skb);
72329 return NET_RX_DROP;
72330 }
72331 @@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72332
72333 struct dev_gso_cb {
72334 void (*destructor)(struct sk_buff *skb);
72335 -};
72336 +} __no_const;
72337
72338 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72339
72340 @@ -2970,7 +2974,7 @@ enqueue:
72341
72342 local_irq_restore(flags);
72343
72344 - atomic_long_inc(&skb->dev->rx_dropped);
72345 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72346 kfree_skb(skb);
72347 return NET_RX_DROP;
72348 }
72349 @@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72350 }
72351 EXPORT_SYMBOL(netif_rx_ni);
72352
72353 -static void net_tx_action(struct softirq_action *h)
72354 +static void net_tx_action(void)
72355 {
72356 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72357
72358 @@ -3333,7 +3337,7 @@ ncls:
72359 if (pt_prev) {
72360 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72361 } else {
72362 - atomic_long_inc(&skb->dev->rx_dropped);
72363 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72364 kfree_skb(skb);
72365 /* Jamal, now you will not able to escape explaining
72366 * me how you were going to use this. :-)
72367 @@ -3891,7 +3895,7 @@ void netif_napi_del(struct napi_struct *napi)
72368 }
72369 EXPORT_SYMBOL(netif_napi_del);
72370
72371 -static void net_rx_action(struct softirq_action *h)
72372 +static void net_rx_action(void)
72373 {
72374 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72375 unsigned long time_limit = jiffies + 2;
72376 @@ -5949,7 +5953,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72377 } else {
72378 netdev_stats_to_stats64(storage, &dev->stats);
72379 }
72380 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72381 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72382 return storage;
72383 }
72384 EXPORT_SYMBOL(dev_get_stats);
72385 diff --git a/net/core/flow.c b/net/core/flow.c
72386 index e318c7e..168b1d0 100644
72387 --- a/net/core/flow.c
72388 +++ b/net/core/flow.c
72389 @@ -61,7 +61,7 @@ struct flow_cache {
72390 struct timer_list rnd_timer;
72391 };
72392
72393 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
72394 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72395 EXPORT_SYMBOL(flow_cache_genid);
72396 static struct flow_cache flow_cache_global;
72397 static struct kmem_cache *flow_cachep __read_mostly;
72398 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72399
72400 static int flow_entry_valid(struct flow_cache_entry *fle)
72401 {
72402 - if (atomic_read(&flow_cache_genid) != fle->genid)
72403 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72404 return 0;
72405 if (fle->object && !fle->object->ops->check(fle->object))
72406 return 0;
72407 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72408 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72409 fcp->hash_count++;
72410 }
72411 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72412 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72413 flo = fle->object;
72414 if (!flo)
72415 goto ret_object;
72416 @@ -280,7 +280,7 @@ nocache:
72417 }
72418 flo = resolver(net, key, family, dir, flo, ctx);
72419 if (fle) {
72420 - fle->genid = atomic_read(&flow_cache_genid);
72421 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
72422 if (!IS_ERR(flo))
72423 fle->object = flo;
72424 else
72425 diff --git a/net/core/iovec.c b/net/core/iovec.c
72426 index c40f27e..7f49254 100644
72427 --- a/net/core/iovec.c
72428 +++ b/net/core/iovec.c
72429 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72430 if (m->msg_namelen) {
72431 if (mode == VERIFY_READ) {
72432 void __user *namep;
72433 - namep = (void __user __force *) m->msg_name;
72434 + namep = (void __force_user *) m->msg_name;
72435 err = move_addr_to_kernel(namep, m->msg_namelen,
72436 address);
72437 if (err < 0)
72438 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72439 }
72440
72441 size = m->msg_iovlen * sizeof(struct iovec);
72442 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72443 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72444 return -EFAULT;
72445
72446 m->msg_iov = iov;
72447 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72448 index 9083e82..1673203 100644
72449 --- a/net/core/rtnetlink.c
72450 +++ b/net/core/rtnetlink.c
72451 @@ -57,7 +57,7 @@ struct rtnl_link {
72452 rtnl_doit_func doit;
72453 rtnl_dumpit_func dumpit;
72454 rtnl_calcit_func calcit;
72455 -};
72456 +} __no_const;
72457
72458 static DEFINE_MUTEX(rtnl_mutex);
72459 static u16 min_ifinfo_dump_size;
72460 diff --git a/net/core/scm.c b/net/core/scm.c
72461 index ff52ad0..aff1c0f 100644
72462 --- a/net/core/scm.c
72463 +++ b/net/core/scm.c
72464 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72465 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72466 {
72467 struct cmsghdr __user *cm
72468 - = (__force struct cmsghdr __user *)msg->msg_control;
72469 + = (struct cmsghdr __force_user *)msg->msg_control;
72470 struct cmsghdr cmhdr;
72471 int cmlen = CMSG_LEN(len);
72472 int err;
72473 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72474 err = -EFAULT;
72475 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72476 goto out;
72477 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72478 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72479 goto out;
72480 cmlen = CMSG_SPACE(len);
72481 if (msg->msg_controllen < cmlen)
72482 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72483 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72484 {
72485 struct cmsghdr __user *cm
72486 - = (__force struct cmsghdr __user*)msg->msg_control;
72487 + = (struct cmsghdr __force_user *)msg->msg_control;
72488
72489 int fdmax = 0;
72490 int fdnum = scm->fp->count;
72491 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72492 if (fdnum < fdmax)
72493 fdmax = fdnum;
72494
72495 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72496 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72497 i++, cmfptr++)
72498 {
72499 int new_fd;
72500 diff --git a/net/core/sock.c b/net/core/sock.c
72501 index b23f174..b9a0d26 100644
72502 --- a/net/core/sock.c
72503 +++ b/net/core/sock.c
72504 @@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72505 struct sk_buff_head *list = &sk->sk_receive_queue;
72506
72507 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72508 - atomic_inc(&sk->sk_drops);
72509 + atomic_inc_unchecked(&sk->sk_drops);
72510 trace_sock_rcvqueue_full(sk, skb);
72511 return -ENOMEM;
72512 }
72513 @@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72514 return err;
72515
72516 if (!sk_rmem_schedule(sk, skb->truesize)) {
72517 - atomic_inc(&sk->sk_drops);
72518 + atomic_inc_unchecked(&sk->sk_drops);
72519 return -ENOBUFS;
72520 }
72521
72522 @@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72523 skb_dst_force(skb);
72524
72525 spin_lock_irqsave(&list->lock, flags);
72526 - skb->dropcount = atomic_read(&sk->sk_drops);
72527 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72528 __skb_queue_tail(list, skb);
72529 spin_unlock_irqrestore(&list->lock, flags);
72530
72531 @@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72532 skb->dev = NULL;
72533
72534 if (sk_rcvqueues_full(sk, skb)) {
72535 - atomic_inc(&sk->sk_drops);
72536 + atomic_inc_unchecked(&sk->sk_drops);
72537 goto discard_and_relse;
72538 }
72539 if (nested)
72540 @@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72541 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72542 } else if (sk_add_backlog(sk, skb)) {
72543 bh_unlock_sock(sk);
72544 - atomic_inc(&sk->sk_drops);
72545 + atomic_inc_unchecked(&sk->sk_drops);
72546 goto discard_and_relse;
72547 }
72548
72549 @@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72550 if (len > sizeof(peercred))
72551 len = sizeof(peercred);
72552 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72553 - if (copy_to_user(optval, &peercred, len))
72554 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72555 return -EFAULT;
72556 goto lenout;
72557 }
72558 @@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72559 return -ENOTCONN;
72560 if (lv < len)
72561 return -EINVAL;
72562 - if (copy_to_user(optval, address, len))
72563 + if (len > sizeof(address) || copy_to_user(optval, address, len))
72564 return -EFAULT;
72565 goto lenout;
72566 }
72567 @@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72568
72569 if (len > lv)
72570 len = lv;
72571 - if (copy_to_user(optval, &v, len))
72572 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
72573 return -EFAULT;
72574 lenout:
72575 if (put_user(len, optlen))
72576 @@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
72577 */
72578 smp_wmb();
72579 atomic_set(&sk->sk_refcnt, 1);
72580 - atomic_set(&sk->sk_drops, 0);
72581 + atomic_set_unchecked(&sk->sk_drops, 0);
72582 }
72583 EXPORT_SYMBOL(sock_init_data);
72584
72585 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
72586 index 02e75d1..9a57a7c 100644
72587 --- a/net/decnet/sysctl_net_decnet.c
72588 +++ b/net/decnet/sysctl_net_decnet.c
72589 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
72590
72591 if (len > *lenp) len = *lenp;
72592
72593 - if (copy_to_user(buffer, addr, len))
72594 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
72595 return -EFAULT;
72596
72597 *lenp = len;
72598 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
72599
72600 if (len > *lenp) len = *lenp;
72601
72602 - if (copy_to_user(buffer, devname, len))
72603 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
72604 return -EFAULT;
72605
72606 *lenp = len;
72607 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
72608 index 39a2d29..f39c0fe 100644
72609 --- a/net/econet/Kconfig
72610 +++ b/net/econet/Kconfig
72611 @@ -4,7 +4,7 @@
72612
72613 config ECONET
72614 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72615 - depends on EXPERIMENTAL && INET
72616 + depends on EXPERIMENTAL && INET && BROKEN
72617 ---help---
72618 Econet is a fairly old and slow networking protocol mainly used by
72619 Acorn computers to access file and print servers. It uses native
72620 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
72621 index 92fc5f6..b790d91 100644
72622 --- a/net/ipv4/fib_frontend.c
72623 +++ b/net/ipv4/fib_frontend.c
72624 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
72625 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72626 fib_sync_up(dev);
72627 #endif
72628 - atomic_inc(&net->ipv4.dev_addr_genid);
72629 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72630 rt_cache_flush(dev_net(dev), -1);
72631 break;
72632 case NETDEV_DOWN:
72633 fib_del_ifaddr(ifa, NULL);
72634 - atomic_inc(&net->ipv4.dev_addr_genid);
72635 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72636 if (ifa->ifa_dev->ifa_list == NULL) {
72637 /* Last address was deleted from this interface.
72638 * Disable IP.
72639 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
72640 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72641 fib_sync_up(dev);
72642 #endif
72643 - atomic_inc(&net->ipv4.dev_addr_genid);
72644 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72645 rt_cache_flush(dev_net(dev), -1);
72646 break;
72647 case NETDEV_DOWN:
72648 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
72649 index 80106d8..232e898 100644
72650 --- a/net/ipv4/fib_semantics.c
72651 +++ b/net/ipv4/fib_semantics.c
72652 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
72653 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72654 nh->nh_gw,
72655 nh->nh_parent->fib_scope);
72656 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72657 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72658
72659 return nh->nh_saddr;
72660 }
72661 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
72662 index ccee270..db23c3c 100644
72663 --- a/net/ipv4/inet_diag.c
72664 +++ b/net/ipv4/inet_diag.c
72665 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
72666 r->idiag_retrans = 0;
72667
72668 r->id.idiag_if = sk->sk_bound_dev_if;
72669 +
72670 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72671 + r->id.idiag_cookie[0] = 0;
72672 + r->id.idiag_cookie[1] = 0;
72673 +#else
72674 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72675 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72676 +#endif
72677
72678 r->id.idiag_sport = inet->inet_sport;
72679 r->id.idiag_dport = inet->inet_dport;
72680 @@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
72681 r->idiag_family = tw->tw_family;
72682 r->idiag_retrans = 0;
72683 r->id.idiag_if = tw->tw_bound_dev_if;
72684 +
72685 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72686 + r->id.idiag_cookie[0] = 0;
72687 + r->id.idiag_cookie[1] = 0;
72688 +#else
72689 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72690 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72691 +#endif
72692 +
72693 r->id.idiag_sport = tw->tw_sport;
72694 r->id.idiag_dport = tw->tw_dport;
72695 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72696 @@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
72697 if (sk == NULL)
72698 goto unlock;
72699
72700 +#ifndef CONFIG_GRKERNSEC_HIDESYM
72701 err = -ESTALE;
72702 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72703 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72704 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72705 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72706 goto out;
72707 +#endif
72708
72709 err = -ENOMEM;
72710 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72711 @@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
72712 r->idiag_retrans = req->retrans;
72713
72714 r->id.idiag_if = sk->sk_bound_dev_if;
72715 +
72716 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72717 + r->id.idiag_cookie[0] = 0;
72718 + r->id.idiag_cookie[1] = 0;
72719 +#else
72720 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72721 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72722 +#endif
72723
72724 tmo = req->expires - jiffies;
72725 if (tmo < 0)
72726 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
72727 index 984ec65..97ac518 100644
72728 --- a/net/ipv4/inet_hashtables.c
72729 +++ b/net/ipv4/inet_hashtables.c
72730 @@ -18,12 +18,15 @@
72731 #include <linux/sched.h>
72732 #include <linux/slab.h>
72733 #include <linux/wait.h>
72734 +#include <linux/security.h>
72735
72736 #include <net/inet_connection_sock.h>
72737 #include <net/inet_hashtables.h>
72738 #include <net/secure_seq.h>
72739 #include <net/ip.h>
72740
72741 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72742 +
72743 /*
72744 * Allocate and initialize a new local port bind bucket.
72745 * The bindhash mutex for snum's hash chain must be held here.
72746 @@ -530,6 +533,8 @@ ok:
72747 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72748 spin_unlock(&head->lock);
72749
72750 + gr_update_task_in_ip_table(current, inet_sk(sk));
72751 +
72752 if (tw) {
72753 inet_twsk_deschedule(tw, death_row);
72754 while (twrefcnt) {
72755 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
72756 index 86f13c67..59a35b5 100644
72757 --- a/net/ipv4/inetpeer.c
72758 +++ b/net/ipv4/inetpeer.c
72759 @@ -436,8 +436,8 @@ relookup:
72760 if (p) {
72761 p->daddr = *daddr;
72762 atomic_set(&p->refcnt, 1);
72763 - atomic_set(&p->rid, 0);
72764 - atomic_set(&p->ip_id_count,
72765 + atomic_set_unchecked(&p->rid, 0);
72766 + atomic_set_unchecked(&p->ip_id_count,
72767 (daddr->family == AF_INET) ?
72768 secure_ip_id(daddr->addr.a4) :
72769 secure_ipv6_id(daddr->addr.a6));
72770 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
72771 index fdaabf2..0ec3205 100644
72772 --- a/net/ipv4/ip_fragment.c
72773 +++ b/net/ipv4/ip_fragment.c
72774 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
72775 return 0;
72776
72777 start = qp->rid;
72778 - end = atomic_inc_return(&peer->rid);
72779 + end = atomic_inc_return_unchecked(&peer->rid);
72780 qp->rid = end;
72781
72782 rc = qp->q.fragments && (end - start) > max;
72783 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
72784 index 09ff51b..d3968eb 100644
72785 --- a/net/ipv4/ip_sockglue.c
72786 +++ b/net/ipv4/ip_sockglue.c
72787 @@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72788 len = min_t(unsigned int, len, opt->optlen);
72789 if (put_user(len, optlen))
72790 return -EFAULT;
72791 - if (copy_to_user(optval, opt->__data, len))
72792 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72793 + copy_to_user(optval, opt->__data, len))
72794 return -EFAULT;
72795 return 0;
72796 }
72797 @@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72798 if (sk->sk_type != SOCK_STREAM)
72799 return -ENOPROTOOPT;
72800
72801 - msg.msg_control = optval;
72802 + msg.msg_control = (void __force_kernel *)optval;
72803 msg.msg_controllen = len;
72804 msg.msg_flags = flags;
72805
72806 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
72807 index 99ec116..c5628fe 100644
72808 --- a/net/ipv4/ipconfig.c
72809 +++ b/net/ipv4/ipconfig.c
72810 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
72811
72812 mm_segment_t oldfs = get_fs();
72813 set_fs(get_ds());
72814 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72815 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72816 set_fs(oldfs);
72817 return res;
72818 }
72819 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
72820
72821 mm_segment_t oldfs = get_fs();
72822 set_fs(get_ds());
72823 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72824 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72825 set_fs(oldfs);
72826 return res;
72827 }
72828 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
72829
72830 mm_segment_t oldfs = get_fs();
72831 set_fs(get_ds());
72832 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72833 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72834 set_fs(oldfs);
72835 return res;
72836 }
72837 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72838 index 2133c30..5c4b40b 100644
72839 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
72840 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72841 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
72842
72843 *len = 0;
72844
72845 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72846 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72847 if (*octets == NULL)
72848 return 0;
72849
72850 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
72851 index 43d4c3b..1914409 100644
72852 --- a/net/ipv4/ping.c
72853 +++ b/net/ipv4/ping.c
72854 @@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
72855 sk_rmem_alloc_get(sp),
72856 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72857 atomic_read(&sp->sk_refcnt), sp,
72858 - atomic_read(&sp->sk_drops), len);
72859 + atomic_read_unchecked(&sp->sk_drops), len);
72860 }
72861
72862 static int ping_seq_show(struct seq_file *seq, void *v)
72863 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
72864 index 007e2eb..85a18a0 100644
72865 --- a/net/ipv4/raw.c
72866 +++ b/net/ipv4/raw.c
72867 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
72868 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72869 {
72870 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72871 - atomic_inc(&sk->sk_drops);
72872 + atomic_inc_unchecked(&sk->sk_drops);
72873 kfree_skb(skb);
72874 return NET_RX_DROP;
72875 }
72876 @@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
72877
72878 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72879 {
72880 + struct icmp_filter filter;
72881 +
72882 if (optlen > sizeof(struct icmp_filter))
72883 optlen = sizeof(struct icmp_filter);
72884 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72885 + if (copy_from_user(&filter, optval, optlen))
72886 return -EFAULT;
72887 + raw_sk(sk)->filter = filter;
72888 return 0;
72889 }
72890
72891 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72892 {
72893 int len, ret = -EFAULT;
72894 + struct icmp_filter filter;
72895
72896 if (get_user(len, optlen))
72897 goto out;
72898 @@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
72899 if (len > sizeof(struct icmp_filter))
72900 len = sizeof(struct icmp_filter);
72901 ret = -EFAULT;
72902 - if (put_user(len, optlen) ||
72903 - copy_to_user(optval, &raw_sk(sk)->filter, len))
72904 + filter = raw_sk(sk)->filter;
72905 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
72906 goto out;
72907 ret = 0;
72908 out: return ret;
72909 @@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
72910 sk_wmem_alloc_get(sp),
72911 sk_rmem_alloc_get(sp),
72912 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72913 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72914 + atomic_read(&sp->sk_refcnt),
72915 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72916 + NULL,
72917 +#else
72918 + sp,
72919 +#endif
72920 + atomic_read_unchecked(&sp->sk_drops));
72921 }
72922
72923 static int raw_seq_show(struct seq_file *seq, void *v)
72924 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
72925 index 94cdbc5..0cb0063 100644
72926 --- a/net/ipv4/route.c
72927 +++ b/net/ipv4/route.c
72928 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
72929
72930 static inline int rt_genid(struct net *net)
72931 {
72932 - return atomic_read(&net->ipv4.rt_genid);
72933 + return atomic_read_unchecked(&net->ipv4.rt_genid);
72934 }
72935
72936 #ifdef CONFIG_PROC_FS
72937 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
72938 unsigned char shuffle;
72939
72940 get_random_bytes(&shuffle, sizeof(shuffle));
72941 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72942 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72943 redirect_genid++;
72944 }
72945
72946 @@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
72947 error = rt->dst.error;
72948 if (peer) {
72949 inet_peer_refcheck(rt->peer);
72950 - id = atomic_read(&peer->ip_id_count) & 0xffff;
72951 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72952 if (peer->tcp_ts_stamp) {
72953 ts = peer->tcp_ts;
72954 tsage = get_seconds() - peer->tcp_ts_stamp;
72955 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
72956 index c89e354..8bd55c8 100644
72957 --- a/net/ipv4/tcp_ipv4.c
72958 +++ b/net/ipv4/tcp_ipv4.c
72959 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72960 int sysctl_tcp_low_latency __read_mostly;
72961 EXPORT_SYMBOL(sysctl_tcp_low_latency);
72962
72963 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72964 +extern int grsec_enable_blackhole;
72965 +#endif
72966
72967 #ifdef CONFIG_TCP_MD5SIG
72968 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72969 @@ -1627,6 +1630,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
72970 return 0;
72971
72972 reset:
72973 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72974 + if (!grsec_enable_blackhole)
72975 +#endif
72976 tcp_v4_send_reset(rsk, skb);
72977 discard:
72978 kfree_skb(skb);
72979 @@ -1689,12 +1695,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
72980 TCP_SKB_CB(skb)->sacked = 0;
72981
72982 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72983 - if (!sk)
72984 + if (!sk) {
72985 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72986 + ret = 1;
72987 +#endif
72988 goto no_tcp_socket;
72989 -
72990 + }
72991 process:
72992 - if (sk->sk_state == TCP_TIME_WAIT)
72993 + if (sk->sk_state == TCP_TIME_WAIT) {
72994 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72995 + ret = 2;
72996 +#endif
72997 goto do_time_wait;
72998 + }
72999
73000 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73001 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73002 @@ -1744,6 +1757,10 @@ no_tcp_socket:
73003 bad_packet:
73004 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73005 } else {
73006 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73007 + if (!grsec_enable_blackhole || (ret == 1 &&
73008 + (skb->dev->flags & IFF_LOOPBACK)))
73009 +#endif
73010 tcp_v4_send_reset(NULL, skb);
73011 }
73012
73013 @@ -2404,7 +2421,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73014 0, /* non standard timer */
73015 0, /* open_requests have no inode */
73016 atomic_read(&sk->sk_refcnt),
73017 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73018 + NULL,
73019 +#else
73020 req,
73021 +#endif
73022 len);
73023 }
73024
73025 @@ -2454,7 +2475,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73026 sock_i_uid(sk),
73027 icsk->icsk_probes_out,
73028 sock_i_ino(sk),
73029 - atomic_read(&sk->sk_refcnt), sk,
73030 + atomic_read(&sk->sk_refcnt),
73031 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73032 + NULL,
73033 +#else
73034 + sk,
73035 +#endif
73036 jiffies_to_clock_t(icsk->icsk_rto),
73037 jiffies_to_clock_t(icsk->icsk_ack.ato),
73038 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73039 @@ -2482,7 +2508,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73040 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73041 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73042 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73043 - atomic_read(&tw->tw_refcnt), tw, len);
73044 + atomic_read(&tw->tw_refcnt),
73045 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73046 + NULL,
73047 +#else
73048 + tw,
73049 +#endif
73050 + len);
73051 }
73052
73053 #define TMPSZ 150
73054 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73055 index 66363b6..b0654a3 100644
73056 --- a/net/ipv4/tcp_minisocks.c
73057 +++ b/net/ipv4/tcp_minisocks.c
73058 @@ -27,6 +27,10 @@
73059 #include <net/inet_common.h>
73060 #include <net/xfrm.h>
73061
73062 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73063 +extern int grsec_enable_blackhole;
73064 +#endif
73065 +
73066 int sysctl_tcp_syncookies __read_mostly = 1;
73067 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73068
73069 @@ -751,6 +755,10 @@ listen_overflow:
73070
73071 embryonic_reset:
73072 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73073 +
73074 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73075 + if (!grsec_enable_blackhole)
73076 +#endif
73077 if (!(flg & TCP_FLAG_RST))
73078 req->rsk_ops->send_reset(sk, skb);
73079
73080 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73081 index 85ee7eb..53277ab 100644
73082 --- a/net/ipv4/tcp_probe.c
73083 +++ b/net/ipv4/tcp_probe.c
73084 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73085 if (cnt + width >= len)
73086 break;
73087
73088 - if (copy_to_user(buf + cnt, tbuf, width))
73089 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73090 return -EFAULT;
73091 cnt += width;
73092 }
73093 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73094 index 2e0f0af..e2948bf 100644
73095 --- a/net/ipv4/tcp_timer.c
73096 +++ b/net/ipv4/tcp_timer.c
73097 @@ -22,6 +22,10 @@
73098 #include <linux/gfp.h>
73099 #include <net/tcp.h>
73100
73101 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73102 +extern int grsec_lastack_retries;
73103 +#endif
73104 +
73105 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73106 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73107 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73108 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73109 }
73110 }
73111
73112 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73113 + if ((sk->sk_state == TCP_LAST_ACK) &&
73114 + (grsec_lastack_retries > 0) &&
73115 + (grsec_lastack_retries < retry_until))
73116 + retry_until = grsec_lastack_retries;
73117 +#endif
73118 +
73119 if (retransmits_timed_out(sk, retry_until,
73120 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73121 /* Has it gone just too far? */
73122 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73123 index 5a65eea..bd913a1 100644
73124 --- a/net/ipv4/udp.c
73125 +++ b/net/ipv4/udp.c
73126 @@ -86,6 +86,7 @@
73127 #include <linux/types.h>
73128 #include <linux/fcntl.h>
73129 #include <linux/module.h>
73130 +#include <linux/security.h>
73131 #include <linux/socket.h>
73132 #include <linux/sockios.h>
73133 #include <linux/igmp.h>
73134 @@ -108,6 +109,10 @@
73135 #include <trace/events/udp.h>
73136 #include "udp_impl.h"
73137
73138 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73139 +extern int grsec_enable_blackhole;
73140 +#endif
73141 +
73142 struct udp_table udp_table __read_mostly;
73143 EXPORT_SYMBOL(udp_table);
73144
73145 @@ -565,6 +570,9 @@ found:
73146 return s;
73147 }
73148
73149 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73150 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73151 +
73152 /*
73153 * This routine is called by the ICMP module when it gets some
73154 * sort of error condition. If err < 0 then the socket should
73155 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73156 dport = usin->sin_port;
73157 if (dport == 0)
73158 return -EINVAL;
73159 +
73160 + err = gr_search_udp_sendmsg(sk, usin);
73161 + if (err)
73162 + return err;
73163 } else {
73164 if (sk->sk_state != TCP_ESTABLISHED)
73165 return -EDESTADDRREQ;
73166 +
73167 + err = gr_search_udp_sendmsg(sk, NULL);
73168 + if (err)
73169 + return err;
73170 +
73171 daddr = inet->inet_daddr;
73172 dport = inet->inet_dport;
73173 /* Open fast path for connected socket.
73174 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73175 udp_lib_checksum_complete(skb)) {
73176 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73177 IS_UDPLITE(sk));
73178 - atomic_inc(&sk->sk_drops);
73179 + atomic_inc_unchecked(&sk->sk_drops);
73180 __skb_unlink(skb, rcvq);
73181 __skb_queue_tail(&list_kill, skb);
73182 }
73183 @@ -1185,6 +1202,10 @@ try_again:
73184 if (!skb)
73185 goto out;
73186
73187 + err = gr_search_udp_recvmsg(sk, skb);
73188 + if (err)
73189 + goto out_free;
73190 +
73191 ulen = skb->len - sizeof(struct udphdr);
73192 copied = len;
73193 if (copied > ulen)
73194 @@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73195
73196 drop:
73197 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73198 - atomic_inc(&sk->sk_drops);
73199 + atomic_inc_unchecked(&sk->sk_drops);
73200 kfree_skb(skb);
73201 return -1;
73202 }
73203 @@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73204 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73205
73206 if (!skb1) {
73207 - atomic_inc(&sk->sk_drops);
73208 + atomic_inc_unchecked(&sk->sk_drops);
73209 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73210 IS_UDPLITE(sk));
73211 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73212 @@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73213 goto csum_error;
73214
73215 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73216 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73217 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73218 +#endif
73219 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73220
73221 /*
73222 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73223 sk_wmem_alloc_get(sp),
73224 sk_rmem_alloc_get(sp),
73225 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73226 - atomic_read(&sp->sk_refcnt), sp,
73227 - atomic_read(&sp->sk_drops), len);
73228 + atomic_read(&sp->sk_refcnt),
73229 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73230 + NULL,
73231 +#else
73232 + sp,
73233 +#endif
73234 + atomic_read_unchecked(&sp->sk_drops), len);
73235 }
73236
73237 int udp4_seq_show(struct seq_file *seq, void *v)
73238 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73239 index 836c4ea..cbb74dc 100644
73240 --- a/net/ipv6/addrconf.c
73241 +++ b/net/ipv6/addrconf.c
73242 @@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73243 p.iph.ihl = 5;
73244 p.iph.protocol = IPPROTO_IPV6;
73245 p.iph.ttl = 64;
73246 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73247 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73248
73249 if (ops->ndo_do_ioctl) {
73250 mm_segment_t oldfs = get_fs();
73251 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73252 index 1567fb1..29af910 100644
73253 --- a/net/ipv6/inet6_connection_sock.c
73254 +++ b/net/ipv6/inet6_connection_sock.c
73255 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73256 #ifdef CONFIG_XFRM
73257 {
73258 struct rt6_info *rt = (struct rt6_info *)dst;
73259 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73260 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73261 }
73262 #endif
73263 }
73264 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73265 #ifdef CONFIG_XFRM
73266 if (dst) {
73267 struct rt6_info *rt = (struct rt6_info *)dst;
73268 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73269 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73270 __sk_dst_reset(sk);
73271 dst = NULL;
73272 }
73273 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73274 index 26cb08c..8af9877 100644
73275 --- a/net/ipv6/ipv6_sockglue.c
73276 +++ b/net/ipv6/ipv6_sockglue.c
73277 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73278 if (sk->sk_type != SOCK_STREAM)
73279 return -ENOPROTOOPT;
73280
73281 - msg.msg_control = optval;
73282 + msg.msg_control = (void __force_kernel *)optval;
73283 msg.msg_controllen = len;
73284 msg.msg_flags = flags;
73285
73286 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73287 index 361ebf3..d5628fb 100644
73288 --- a/net/ipv6/raw.c
73289 +++ b/net/ipv6/raw.c
73290 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73291 {
73292 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73293 skb_checksum_complete(skb)) {
73294 - atomic_inc(&sk->sk_drops);
73295 + atomic_inc_unchecked(&sk->sk_drops);
73296 kfree_skb(skb);
73297 return NET_RX_DROP;
73298 }
73299 @@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73300 struct raw6_sock *rp = raw6_sk(sk);
73301
73302 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73303 - atomic_inc(&sk->sk_drops);
73304 + atomic_inc_unchecked(&sk->sk_drops);
73305 kfree_skb(skb);
73306 return NET_RX_DROP;
73307 }
73308 @@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73309
73310 if (inet->hdrincl) {
73311 if (skb_checksum_complete(skb)) {
73312 - atomic_inc(&sk->sk_drops);
73313 + atomic_inc_unchecked(&sk->sk_drops);
73314 kfree_skb(skb);
73315 return NET_RX_DROP;
73316 }
73317 @@ -601,7 +601,7 @@ out:
73318 return err;
73319 }
73320
73321 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73322 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73323 struct flowi6 *fl6, struct dst_entry **dstp,
73324 unsigned int flags)
73325 {
73326 @@ -909,12 +909,15 @@ do_confirm:
73327 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73328 char __user *optval, int optlen)
73329 {
73330 + struct icmp6_filter filter;
73331 +
73332 switch (optname) {
73333 case ICMPV6_FILTER:
73334 if (optlen > sizeof(struct icmp6_filter))
73335 optlen = sizeof(struct icmp6_filter);
73336 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73337 + if (copy_from_user(&filter, optval, optlen))
73338 return -EFAULT;
73339 + raw6_sk(sk)->filter = filter;
73340 return 0;
73341 default:
73342 return -ENOPROTOOPT;
73343 @@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73344 char __user *optval, int __user *optlen)
73345 {
73346 int len;
73347 + struct icmp6_filter filter;
73348
73349 switch (optname) {
73350 case ICMPV6_FILTER:
73351 @@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73352 len = sizeof(struct icmp6_filter);
73353 if (put_user(len, optlen))
73354 return -EFAULT;
73355 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73356 + filter = raw6_sk(sk)->filter;
73357 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
73358 return -EFAULT;
73359 return 0;
73360 default:
73361 @@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73362 0, 0L, 0,
73363 sock_i_uid(sp), 0,
73364 sock_i_ino(sp),
73365 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73366 + atomic_read(&sp->sk_refcnt),
73367 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73368 + NULL,
73369 +#else
73370 + sp,
73371 +#endif
73372 + atomic_read_unchecked(&sp->sk_drops));
73373 }
73374
73375 static int raw6_seq_show(struct seq_file *seq, void *v)
73376 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73377 index b859e4a..f9d1589 100644
73378 --- a/net/ipv6/tcp_ipv6.c
73379 +++ b/net/ipv6/tcp_ipv6.c
73380 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73381 }
73382 #endif
73383
73384 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73385 +extern int grsec_enable_blackhole;
73386 +#endif
73387 +
73388 static void tcp_v6_hash(struct sock *sk)
73389 {
73390 if (sk->sk_state != TCP_CLOSE) {
73391 @@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73392 return 0;
73393
73394 reset:
73395 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73396 + if (!grsec_enable_blackhole)
73397 +#endif
73398 tcp_v6_send_reset(sk, skb);
73399 discard:
73400 if (opt_skb)
73401 @@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73402 TCP_SKB_CB(skb)->sacked = 0;
73403
73404 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73405 - if (!sk)
73406 + if (!sk) {
73407 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73408 + ret = 1;
73409 +#endif
73410 goto no_tcp_socket;
73411 + }
73412
73413 process:
73414 - if (sk->sk_state == TCP_TIME_WAIT)
73415 + if (sk->sk_state == TCP_TIME_WAIT) {
73416 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73417 + ret = 2;
73418 +#endif
73419 goto do_time_wait;
73420 + }
73421
73422 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73423 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73424 @@ -1783,6 +1798,10 @@ no_tcp_socket:
73425 bad_packet:
73426 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73427 } else {
73428 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73429 + if (!grsec_enable_blackhole || (ret == 1 &&
73430 + (skb->dev->flags & IFF_LOOPBACK)))
73431 +#endif
73432 tcp_v6_send_reset(NULL, skb);
73433 }
73434
73435 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73436 uid,
73437 0, /* non standard timer */
73438 0, /* open_requests have no inode */
73439 - 0, req);
73440 + 0,
73441 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73442 + NULL
73443 +#else
73444 + req
73445 +#endif
73446 + );
73447 }
73448
73449 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73450 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73451 sock_i_uid(sp),
73452 icsk->icsk_probes_out,
73453 sock_i_ino(sp),
73454 - atomic_read(&sp->sk_refcnt), sp,
73455 + atomic_read(&sp->sk_refcnt),
73456 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73457 + NULL,
73458 +#else
73459 + sp,
73460 +#endif
73461 jiffies_to_clock_t(icsk->icsk_rto),
73462 jiffies_to_clock_t(icsk->icsk_ack.ato),
73463 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73464 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73465 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73466 tw->tw_substate, 0, 0,
73467 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73468 - atomic_read(&tw->tw_refcnt), tw);
73469 + atomic_read(&tw->tw_refcnt),
73470 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73471 + NULL
73472 +#else
73473 + tw
73474 +#endif
73475 + );
73476 }
73477
73478 static int tcp6_seq_show(struct seq_file *seq, void *v)
73479 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73480 index 8c25419..47a51ae 100644
73481 --- a/net/ipv6/udp.c
73482 +++ b/net/ipv6/udp.c
73483 @@ -50,6 +50,10 @@
73484 #include <linux/seq_file.h>
73485 #include "udp_impl.h"
73486
73487 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73488 +extern int grsec_enable_blackhole;
73489 +#endif
73490 +
73491 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73492 {
73493 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73494 @@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73495
73496 return 0;
73497 drop:
73498 - atomic_inc(&sk->sk_drops);
73499 + atomic_inc_unchecked(&sk->sk_drops);
73500 drop_no_sk_drops_inc:
73501 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73502 kfree_skb(skb);
73503 @@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73504 continue;
73505 }
73506 drop:
73507 - atomic_inc(&sk->sk_drops);
73508 + atomic_inc_unchecked(&sk->sk_drops);
73509 UDP6_INC_STATS_BH(sock_net(sk),
73510 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73511 UDP6_INC_STATS_BH(sock_net(sk),
73512 @@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73513 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73514 proto == IPPROTO_UDPLITE);
73515
73516 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73517 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73518 +#endif
73519 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73520
73521 kfree_skb(skb);
73522 @@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73523 if (!sock_owned_by_user(sk))
73524 udpv6_queue_rcv_skb(sk, skb);
73525 else if (sk_add_backlog(sk, skb)) {
73526 - atomic_inc(&sk->sk_drops);
73527 + atomic_inc_unchecked(&sk->sk_drops);
73528 bh_unlock_sock(sk);
73529 sock_put(sk);
73530 goto discard;
73531 @@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73532 0, 0L, 0,
73533 sock_i_uid(sp), 0,
73534 sock_i_ino(sp),
73535 - atomic_read(&sp->sk_refcnt), sp,
73536 - atomic_read(&sp->sk_drops));
73537 + atomic_read(&sp->sk_refcnt),
73538 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73539 + NULL,
73540 +#else
73541 + sp,
73542 +#endif
73543 + atomic_read_unchecked(&sp->sk_drops));
73544 }
73545
73546 int udp6_seq_show(struct seq_file *seq, void *v)
73547 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73548 index 253695d..9481ce8 100644
73549 --- a/net/irda/ircomm/ircomm_tty.c
73550 +++ b/net/irda/ircomm/ircomm_tty.c
73551 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73552 add_wait_queue(&self->open_wait, &wait);
73553
73554 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73555 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73556 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73557
73558 /* As far as I can see, we protect open_count - Jean II */
73559 spin_lock_irqsave(&self->spinlock, flags);
73560 if (!tty_hung_up_p(filp)) {
73561 extra_count = 1;
73562 - self->open_count--;
73563 + local_dec(&self->open_count);
73564 }
73565 spin_unlock_irqrestore(&self->spinlock, flags);
73566 - self->blocked_open++;
73567 + local_inc(&self->blocked_open);
73568
73569 while (1) {
73570 if (tty->termios->c_cflag & CBAUD) {
73571 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73572 }
73573
73574 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73575 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73576 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73577
73578 schedule();
73579 }
73580 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73581 if (extra_count) {
73582 /* ++ is not atomic, so this should be protected - Jean II */
73583 spin_lock_irqsave(&self->spinlock, flags);
73584 - self->open_count++;
73585 + local_inc(&self->open_count);
73586 spin_unlock_irqrestore(&self->spinlock, flags);
73587 }
73588 - self->blocked_open--;
73589 + local_dec(&self->blocked_open);
73590
73591 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73592 - __FILE__,__LINE__, tty->driver->name, self->open_count);
73593 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73594
73595 if (!retval)
73596 self->flags |= ASYNC_NORMAL_ACTIVE;
73597 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
73598 }
73599 /* ++ is not atomic, so this should be protected - Jean II */
73600 spin_lock_irqsave(&self->spinlock, flags);
73601 - self->open_count++;
73602 + local_inc(&self->open_count);
73603
73604 tty->driver_data = self;
73605 self->tty = tty;
73606 spin_unlock_irqrestore(&self->spinlock, flags);
73607
73608 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73609 - self->line, self->open_count);
73610 + self->line, local_read(&self->open_count));
73611
73612 /* Not really used by us, but lets do it anyway */
73613 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73614 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73615 return;
73616 }
73617
73618 - if ((tty->count == 1) && (self->open_count != 1)) {
73619 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73620 /*
73621 * Uh, oh. tty->count is 1, which means that the tty
73622 * structure will be freed. state->count should always
73623 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73624 */
73625 IRDA_DEBUG(0, "%s(), bad serial port count; "
73626 "tty->count is 1, state->count is %d\n", __func__ ,
73627 - self->open_count);
73628 - self->open_count = 1;
73629 + local_read(&self->open_count));
73630 + local_set(&self->open_count, 1);
73631 }
73632
73633 - if (--self->open_count < 0) {
73634 + if (local_dec_return(&self->open_count) < 0) {
73635 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73636 - __func__, self->line, self->open_count);
73637 - self->open_count = 0;
73638 + __func__, self->line, local_read(&self->open_count));
73639 + local_set(&self->open_count, 0);
73640 }
73641 - if (self->open_count) {
73642 + if (local_read(&self->open_count)) {
73643 spin_unlock_irqrestore(&self->spinlock, flags);
73644
73645 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73646 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73647 tty->closing = 0;
73648 self->tty = NULL;
73649
73650 - if (self->blocked_open) {
73651 + if (local_read(&self->blocked_open)) {
73652 if (self->close_delay)
73653 schedule_timeout_interruptible(self->close_delay);
73654 wake_up_interruptible(&self->open_wait);
73655 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
73656 spin_lock_irqsave(&self->spinlock, flags);
73657 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73658 self->tty = NULL;
73659 - self->open_count = 0;
73660 + local_set(&self->open_count, 0);
73661 spin_unlock_irqrestore(&self->spinlock, flags);
73662
73663 wake_up_interruptible(&self->open_wait);
73664 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
73665 seq_putc(m, '\n');
73666
73667 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73668 - seq_printf(m, "Open count: %d\n", self->open_count);
73669 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73670 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73671 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73672
73673 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
73674 index 274d150..656a144 100644
73675 --- a/net/iucv/af_iucv.c
73676 +++ b/net/iucv/af_iucv.c
73677 @@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
73678
73679 write_lock_bh(&iucv_sk_list.lock);
73680
73681 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73682 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73683 while (__iucv_get_sock_by_name(name)) {
73684 sprintf(name, "%08x",
73685 - atomic_inc_return(&iucv_sk_list.autobind_name));
73686 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73687 }
73688
73689 write_unlock_bh(&iucv_sk_list.lock);
73690 diff --git a/net/key/af_key.c b/net/key/af_key.c
73691 index 1e733e9..3d73c9f 100644
73692 --- a/net/key/af_key.c
73693 +++ b/net/key/af_key.c
73694 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
73695 static u32 get_acqseq(void)
73696 {
73697 u32 res;
73698 - static atomic_t acqseq;
73699 + static atomic_unchecked_t acqseq;
73700
73701 do {
73702 - res = atomic_inc_return(&acqseq);
73703 + res = atomic_inc_return_unchecked(&acqseq);
73704 } while (!res);
73705 return res;
73706 }
73707 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
73708 index 73495f1..ad51356 100644
73709 --- a/net/mac80211/ieee80211_i.h
73710 +++ b/net/mac80211/ieee80211_i.h
73711 @@ -27,6 +27,7 @@
73712 #include <net/ieee80211_radiotap.h>
73713 #include <net/cfg80211.h>
73714 #include <net/mac80211.h>
73715 +#include <asm/local.h>
73716 #include "key.h"
73717 #include "sta_info.h"
73718
73719 @@ -764,7 +765,7 @@ struct ieee80211_local {
73720 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73721 spinlock_t queue_stop_reason_lock;
73722
73723 - int open_count;
73724 + local_t open_count;
73725 int monitors, cooked_mntrs;
73726 /* number of interfaces with corresponding FIF_ flags */
73727 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73728 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
73729 index 30d7355..e260095 100644
73730 --- a/net/mac80211/iface.c
73731 +++ b/net/mac80211/iface.c
73732 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73733 break;
73734 }
73735
73736 - if (local->open_count == 0) {
73737 + if (local_read(&local->open_count) == 0) {
73738 res = drv_start(local);
73739 if (res)
73740 goto err_del_bss;
73741 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73742 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73743
73744 if (!is_valid_ether_addr(dev->dev_addr)) {
73745 - if (!local->open_count)
73746 + if (!local_read(&local->open_count))
73747 drv_stop(local);
73748 return -EADDRNOTAVAIL;
73749 }
73750 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73751 mutex_unlock(&local->mtx);
73752
73753 if (coming_up)
73754 - local->open_count++;
73755 + local_inc(&local->open_count);
73756
73757 if (hw_reconf_flags) {
73758 ieee80211_hw_config(local, hw_reconf_flags);
73759 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73760 err_del_interface:
73761 drv_remove_interface(local, &sdata->vif);
73762 err_stop:
73763 - if (!local->open_count)
73764 + if (!local_read(&local->open_count))
73765 drv_stop(local);
73766 err_del_bss:
73767 sdata->bss = NULL;
73768 @@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73769 }
73770
73771 if (going_down)
73772 - local->open_count--;
73773 + local_dec(&local->open_count);
73774
73775 switch (sdata->vif.type) {
73776 case NL80211_IFTYPE_AP_VLAN:
73777 @@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73778
73779 ieee80211_recalc_ps(local, -1);
73780
73781 - if (local->open_count == 0) {
73782 + if (local_read(&local->open_count) == 0) {
73783 if (local->ops->napi_poll)
73784 napi_disable(&local->napi);
73785 ieee80211_clear_tx_pending(local);
73786 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
73787 index a7536fd..4039cc0 100644
73788 --- a/net/mac80211/main.c
73789 +++ b/net/mac80211/main.c
73790 @@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
73791 local->hw.conf.power_level = power;
73792 }
73793
73794 - if (changed && local->open_count) {
73795 + if (changed && local_read(&local->open_count)) {
73796 ret = drv_config(local, changed);
73797 /*
73798 * Goal:
73799 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
73800 index 9ee7164..56c5061 100644
73801 --- a/net/mac80211/pm.c
73802 +++ b/net/mac80211/pm.c
73803 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73804 struct ieee80211_sub_if_data *sdata;
73805 struct sta_info *sta;
73806
73807 - if (!local->open_count)
73808 + if (!local_read(&local->open_count))
73809 goto suspend;
73810
73811 ieee80211_scan_cancel(local);
73812 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73813 cancel_work_sync(&local->dynamic_ps_enable_work);
73814 del_timer_sync(&local->dynamic_ps_timer);
73815
73816 - local->wowlan = wowlan && local->open_count;
73817 + local->wowlan = wowlan && local_read(&local->open_count);
73818 if (local->wowlan) {
73819 int err = drv_suspend(local, wowlan);
73820 if (err < 0) {
73821 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73822 }
73823
73824 /* stop hardware - this must stop RX */
73825 - if (local->open_count)
73826 + if (local_read(&local->open_count))
73827 ieee80211_stop_device(local);
73828
73829 suspend:
73830 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
73831 index 5a5a776..9600b11 100644
73832 --- a/net/mac80211/rate.c
73833 +++ b/net/mac80211/rate.c
73834 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
73835
73836 ASSERT_RTNL();
73837
73838 - if (local->open_count)
73839 + if (local_read(&local->open_count))
73840 return -EBUSY;
73841
73842 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73843 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
73844 index c97a065..ff61928 100644
73845 --- a/net/mac80211/rc80211_pid_debugfs.c
73846 +++ b/net/mac80211/rc80211_pid_debugfs.c
73847 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
73848
73849 spin_unlock_irqrestore(&events->lock, status);
73850
73851 - if (copy_to_user(buf, pb, p))
73852 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73853 return -EFAULT;
73854
73855 return p;
73856 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
73857 index d5230ec..c604b21 100644
73858 --- a/net/mac80211/util.c
73859 +++ b/net/mac80211/util.c
73860 @@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
73861 drv_set_coverage_class(local, hw->wiphy->coverage_class);
73862
73863 /* everything else happens only if HW was up & running */
73864 - if (!local->open_count)
73865 + if (!local_read(&local->open_count))
73866 goto wake_up;
73867
73868 /*
73869 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
73870 index d5597b7..ab6d39c 100644
73871 --- a/net/netfilter/Kconfig
73872 +++ b/net/netfilter/Kconfig
73873 @@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
73874
73875 To compile it as a module, choose M here. If unsure, say N.
73876
73877 +config NETFILTER_XT_MATCH_GRADM
73878 + tristate '"gradm" match support'
73879 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73880 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73881 + ---help---
73882 + The gradm match allows to match on grsecurity RBAC being enabled.
73883 + It is useful when iptables rules are applied early on bootup to
73884 + prevent connections to the machine (except from a trusted host)
73885 + while the RBAC system is disabled.
73886 +
73887 config NETFILTER_XT_MATCH_HASHLIMIT
73888 tristate '"hashlimit" match support'
73889 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73890 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
73891 index 1a02853..5d8c22e 100644
73892 --- a/net/netfilter/Makefile
73893 +++ b/net/netfilter/Makefile
73894 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73895 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73896 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73897 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73898 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73899 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73900 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73901 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73902 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
73903 index 29fa5ba..8debc79 100644
73904 --- a/net/netfilter/ipvs/ip_vs_conn.c
73905 +++ b/net/netfilter/ipvs/ip_vs_conn.c
73906 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
73907 /* Increase the refcnt counter of the dest */
73908 atomic_inc(&dest->refcnt);
73909
73910 - conn_flags = atomic_read(&dest->conn_flags);
73911 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
73912 if (cp->protocol != IPPROTO_UDP)
73913 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73914 /* Bind with the destination and its corresponding transmitter */
73915 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
73916 atomic_set(&cp->refcnt, 1);
73917
73918 atomic_set(&cp->n_control, 0);
73919 - atomic_set(&cp->in_pkts, 0);
73920 + atomic_set_unchecked(&cp->in_pkts, 0);
73921
73922 atomic_inc(&ipvs->conn_count);
73923 if (flags & IP_VS_CONN_F_NO_CPORT)
73924 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
73925
73926 /* Don't drop the entry if its number of incoming packets is not
73927 located in [0, 8] */
73928 - i = atomic_read(&cp->in_pkts);
73929 + i = atomic_read_unchecked(&cp->in_pkts);
73930 if (i > 8 || i < 0) return 0;
73931
73932 if (!todrop_rate[i]) return 0;
73933 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
73934 index 093cc32..9209ae1 100644
73935 --- a/net/netfilter/ipvs/ip_vs_core.c
73936 +++ b/net/netfilter/ipvs/ip_vs_core.c
73937 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
73938 ret = cp->packet_xmit(skb, cp, pd->pp);
73939 /* do not touch skb anymore */
73940
73941 - atomic_inc(&cp->in_pkts);
73942 + atomic_inc_unchecked(&cp->in_pkts);
73943 ip_vs_conn_put(cp);
73944 return ret;
73945 }
73946 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
73947 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73948 pkts = sysctl_sync_threshold(ipvs);
73949 else
73950 - pkts = atomic_add_return(1, &cp->in_pkts);
73951 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73952
73953 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73954 cp->protocol == IPPROTO_SCTP) {
73955 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
73956 index e1a66cf..0910076 100644
73957 --- a/net/netfilter/ipvs/ip_vs_ctl.c
73958 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
73959 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
73960 ip_vs_rs_hash(ipvs, dest);
73961 write_unlock_bh(&ipvs->rs_lock);
73962 }
73963 - atomic_set(&dest->conn_flags, conn_flags);
73964 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
73965
73966 /* bind the service */
73967 if (!dest->svc) {
73968 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
73969 " %-7s %-6d %-10d %-10d\n",
73970 &dest->addr.in6,
73971 ntohs(dest->port),
73972 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73973 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73974 atomic_read(&dest->weight),
73975 atomic_read(&dest->activeconns),
73976 atomic_read(&dest->inactconns));
73977 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
73978 "%-7s %-6d %-10d %-10d\n",
73979 ntohl(dest->addr.ip),
73980 ntohs(dest->port),
73981 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73982 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73983 atomic_read(&dest->weight),
73984 atomic_read(&dest->activeconns),
73985 atomic_read(&dest->inactconns));
73986 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
73987
73988 entry.addr = dest->addr.ip;
73989 entry.port = dest->port;
73990 - entry.conn_flags = atomic_read(&dest->conn_flags);
73991 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73992 entry.weight = atomic_read(&dest->weight);
73993 entry.u_threshold = dest->u_threshold;
73994 entry.l_threshold = dest->l_threshold;
73995 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
73996 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73997
73998 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73999 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74000 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74001 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74002 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74003 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74004 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74005 index 2b6678c0..aaa41fc 100644
74006 --- a/net/netfilter/ipvs/ip_vs_sync.c
74007 +++ b/net/netfilter/ipvs/ip_vs_sync.c
74008 @@ -649,7 +649,7 @@ control:
74009 * i.e only increment in_pkts for Templates.
74010 */
74011 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74012 - int pkts = atomic_add_return(1, &cp->in_pkts);
74013 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74014
74015 if (pkts % sysctl_sync_period(ipvs) != 1)
74016 return;
74017 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74018
74019 if (opt)
74020 memcpy(&cp->in_seq, opt, sizeof(*opt));
74021 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74022 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74023 cp->state = state;
74024 cp->old_state = cp->state;
74025 /*
74026 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74027 index aa2d720..d8aa111 100644
74028 --- a/net/netfilter/ipvs/ip_vs_xmit.c
74029 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
74030 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74031 else
74032 rc = NF_ACCEPT;
74033 /* do not touch skb anymore */
74034 - atomic_inc(&cp->in_pkts);
74035 + atomic_inc_unchecked(&cp->in_pkts);
74036 goto out;
74037 }
74038
74039 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74040 else
74041 rc = NF_ACCEPT;
74042 /* do not touch skb anymore */
74043 - atomic_inc(&cp->in_pkts);
74044 + atomic_inc_unchecked(&cp->in_pkts);
74045 goto out;
74046 }
74047
74048 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74049 index 66b2c54..c7884e3 100644
74050 --- a/net/netfilter/nfnetlink_log.c
74051 +++ b/net/netfilter/nfnetlink_log.c
74052 @@ -70,7 +70,7 @@ struct nfulnl_instance {
74053 };
74054
74055 static DEFINE_SPINLOCK(instances_lock);
74056 -static atomic_t global_seq;
74057 +static atomic_unchecked_t global_seq;
74058
74059 #define INSTANCE_BUCKETS 16
74060 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74061 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74062 /* global sequence number */
74063 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74064 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74065 - htonl(atomic_inc_return(&global_seq)));
74066 + htonl(atomic_inc_return_unchecked(&global_seq)));
74067
74068 if (data_len) {
74069 struct nlattr *nla;
74070 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74071 new file mode 100644
74072 index 0000000..6905327
74073 --- /dev/null
74074 +++ b/net/netfilter/xt_gradm.c
74075 @@ -0,0 +1,51 @@
74076 +/*
74077 + * gradm match for netfilter
74078 + * Copyright © Zbigniew Krzystolik, 2010
74079 + *
74080 + * This program is free software; you can redistribute it and/or modify
74081 + * it under the terms of the GNU General Public License; either version
74082 + * 2 or 3 as published by the Free Software Foundation.
74083 + */
74084 +#include <linux/module.h>
74085 +#include <linux/moduleparam.h>
74086 +#include <linux/skbuff.h>
74087 +#include <linux/netfilter/x_tables.h>
74088 +#include <linux/grsecurity.h>
74089 +#include <linux/netfilter/xt_gradm.h>
74090 +
74091 +static bool
74092 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
74093 +{
74094 + const struct xt_gradm_mtinfo *info = par->matchinfo;
74095 + bool retval = false;
74096 + if (gr_acl_is_enabled())
74097 + retval = true;
74098 + return retval ^ info->invflags;
74099 +}
74100 +
74101 +static struct xt_match gradm_mt_reg __read_mostly = {
74102 + .name = "gradm",
74103 + .revision = 0,
74104 + .family = NFPROTO_UNSPEC,
74105 + .match = gradm_mt,
74106 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
74107 + .me = THIS_MODULE,
74108 +};
74109 +
74110 +static int __init gradm_mt_init(void)
74111 +{
74112 + return xt_register_match(&gradm_mt_reg);
74113 +}
74114 +
74115 +static void __exit gradm_mt_exit(void)
74116 +{
74117 + xt_unregister_match(&gradm_mt_reg);
74118 +}
74119 +
74120 +module_init(gradm_mt_init);
74121 +module_exit(gradm_mt_exit);
74122 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
74123 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
74124 +MODULE_LICENSE("GPL");
74125 +MODULE_ALIAS("ipt_gradm");
74126 +MODULE_ALIAS("ip6t_gradm");
74127 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
74128 index 4fe4fb4..87a89e5 100644
74129 --- a/net/netfilter/xt_statistic.c
74130 +++ b/net/netfilter/xt_statistic.c
74131 @@ -19,7 +19,7 @@
74132 #include <linux/module.h>
74133
74134 struct xt_statistic_priv {
74135 - atomic_t count;
74136 + atomic_unchecked_t count;
74137 } ____cacheline_aligned_in_smp;
74138
74139 MODULE_LICENSE("GPL");
74140 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
74141 break;
74142 case XT_STATISTIC_MODE_NTH:
74143 do {
74144 - oval = atomic_read(&info->master->count);
74145 + oval = atomic_read_unchecked(&info->master->count);
74146 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
74147 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
74148 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
74149 if (nval == 0)
74150 ret = !ret;
74151 break;
74152 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
74153 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
74154 if (info->master == NULL)
74155 return -ENOMEM;
74156 - atomic_set(&info->master->count, info->u.nth.count);
74157 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
74158
74159 return 0;
74160 }
74161 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
74162 index 1201b6d..bcff8c6 100644
74163 --- a/net/netlink/af_netlink.c
74164 +++ b/net/netlink/af_netlink.c
74165 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
74166 sk->sk_error_report(sk);
74167 }
74168 }
74169 - atomic_inc(&sk->sk_drops);
74170 + atomic_inc_unchecked(&sk->sk_drops);
74171 }
74172
74173 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
74174 @@ -1999,7 +1999,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
74175 sk_wmem_alloc_get(s),
74176 nlk->cb,
74177 atomic_read(&s->sk_refcnt),
74178 - atomic_read(&s->sk_drops),
74179 + atomic_read_unchecked(&s->sk_drops),
74180 sock_i_ino(s)
74181 );
74182
74183 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
74184 index 732152f..60bb09e 100644
74185 --- a/net/netrom/af_netrom.c
74186 +++ b/net/netrom/af_netrom.c
74187 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74188 struct sock *sk = sock->sk;
74189 struct nr_sock *nr = nr_sk(sk);
74190
74191 + memset(sax, 0, sizeof(*sax));
74192 lock_sock(sk);
74193 if (peer != 0) {
74194 if (sk->sk_state != TCP_ESTABLISHED) {
74195 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74196 *uaddr_len = sizeof(struct full_sockaddr_ax25);
74197 } else {
74198 sax->fsa_ax25.sax25_family = AF_NETROM;
74199 - sax->fsa_ax25.sax25_ndigis = 0;
74200 sax->fsa_ax25.sax25_call = nr->source_addr;
74201 *uaddr_len = sizeof(struct sockaddr_ax25);
74202 }
74203 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
74204 index d9d4970..d5a6a68 100644
74205 --- a/net/packet/af_packet.c
74206 +++ b/net/packet/af_packet.c
74207 @@ -1675,7 +1675,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74208
74209 spin_lock(&sk->sk_receive_queue.lock);
74210 po->stats.tp_packets++;
74211 - skb->dropcount = atomic_read(&sk->sk_drops);
74212 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74213 __skb_queue_tail(&sk->sk_receive_queue, skb);
74214 spin_unlock(&sk->sk_receive_queue.lock);
74215 sk->sk_data_ready(sk, skb->len);
74216 @@ -1684,7 +1684,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74217 drop_n_acct:
74218 spin_lock(&sk->sk_receive_queue.lock);
74219 po->stats.tp_drops++;
74220 - atomic_inc(&sk->sk_drops);
74221 + atomic_inc_unchecked(&sk->sk_drops);
74222 spin_unlock(&sk->sk_receive_queue.lock);
74223
74224 drop_n_restore:
74225 @@ -3266,7 +3266,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74226 case PACKET_HDRLEN:
74227 if (len > sizeof(int))
74228 len = sizeof(int);
74229 - if (copy_from_user(&val, optval, len))
74230 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
74231 return -EFAULT;
74232 switch (val) {
74233 case TPACKET_V1:
74234 @@ -3316,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74235
74236 if (put_user(len, optlen))
74237 return -EFAULT;
74238 - if (copy_to_user(optval, data, len))
74239 + if (len > sizeof(st) || copy_to_user(optval, data, len))
74240 return -EFAULT;
74241 return 0;
74242 }
74243 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
74244 index d65f699..05aa6ce 100644
74245 --- a/net/phonet/af_phonet.c
74246 +++ b/net/phonet/af_phonet.c
74247 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
74248 {
74249 struct phonet_protocol *pp;
74250
74251 - if (protocol >= PHONET_NPROTO)
74252 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74253 return NULL;
74254
74255 rcu_read_lock();
74256 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
74257 {
74258 int err = 0;
74259
74260 - if (protocol >= PHONET_NPROTO)
74261 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74262 return -EINVAL;
74263
74264 err = proto_register(pp->prot, 1);
74265 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
74266 index 2ba6e9f..409573f 100644
74267 --- a/net/phonet/pep.c
74268 +++ b/net/phonet/pep.c
74269 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74270
74271 case PNS_PEP_CTRL_REQ:
74272 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
74273 - atomic_inc(&sk->sk_drops);
74274 + atomic_inc_unchecked(&sk->sk_drops);
74275 break;
74276 }
74277 __skb_pull(skb, 4);
74278 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74279 }
74280
74281 if (pn->rx_credits == 0) {
74282 - atomic_inc(&sk->sk_drops);
74283 + atomic_inc_unchecked(&sk->sk_drops);
74284 err = -ENOBUFS;
74285 break;
74286 }
74287 @@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
74288 }
74289
74290 if (pn->rx_credits == 0) {
74291 - atomic_inc(&sk->sk_drops);
74292 + atomic_inc_unchecked(&sk->sk_drops);
74293 err = NET_RX_DROP;
74294 break;
74295 }
74296 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
74297 index 4c7eff3..59c727f 100644
74298 --- a/net/phonet/socket.c
74299 +++ b/net/phonet/socket.c
74300 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
74301 pn->resource, sk->sk_state,
74302 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
74303 sock_i_uid(sk), sock_i_ino(sk),
74304 - atomic_read(&sk->sk_refcnt), sk,
74305 - atomic_read(&sk->sk_drops), &len);
74306 + atomic_read(&sk->sk_refcnt),
74307 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74308 + NULL,
74309 +#else
74310 + sk,
74311 +#endif
74312 + atomic_read_unchecked(&sk->sk_drops), &len);
74313 }
74314 seq_printf(seq, "%*s\n", 127 - len, "");
74315 return 0;
74316 diff --git a/net/rds/cong.c b/net/rds/cong.c
74317 index e5b65ac..f3b6fb7 100644
74318 --- a/net/rds/cong.c
74319 +++ b/net/rds/cong.c
74320 @@ -78,7 +78,7 @@
74321 * finds that the saved generation number is smaller than the global generation
74322 * number, it wakes up the process.
74323 */
74324 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
74325 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
74326
74327 /*
74328 * Congestion monitoring
74329 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
74330 rdsdebug("waking map %p for %pI4\n",
74331 map, &map->m_addr);
74332 rds_stats_inc(s_cong_update_received);
74333 - atomic_inc(&rds_cong_generation);
74334 + atomic_inc_unchecked(&rds_cong_generation);
74335 if (waitqueue_active(&map->m_waitq))
74336 wake_up(&map->m_waitq);
74337 if (waitqueue_active(&rds_poll_waitq))
74338 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
74339
74340 int rds_cong_updated_since(unsigned long *recent)
74341 {
74342 - unsigned long gen = atomic_read(&rds_cong_generation);
74343 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
74344
74345 if (likely(*recent == gen))
74346 return 0;
74347 diff --git a/net/rds/ib.h b/net/rds/ib.h
74348 index edfaaaf..8c89879 100644
74349 --- a/net/rds/ib.h
74350 +++ b/net/rds/ib.h
74351 @@ -128,7 +128,7 @@ struct rds_ib_connection {
74352 /* sending acks */
74353 unsigned long i_ack_flags;
74354 #ifdef KERNEL_HAS_ATOMIC64
74355 - atomic64_t i_ack_next; /* next ACK to send */
74356 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74357 #else
74358 spinlock_t i_ack_lock; /* protect i_ack_next */
74359 u64 i_ack_next; /* next ACK to send */
74360 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
74361 index 51c8689..36c555f 100644
74362 --- a/net/rds/ib_cm.c
74363 +++ b/net/rds/ib_cm.c
74364 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
74365 /* Clear the ACK state */
74366 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74367 #ifdef KERNEL_HAS_ATOMIC64
74368 - atomic64_set(&ic->i_ack_next, 0);
74369 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74370 #else
74371 ic->i_ack_next = 0;
74372 #endif
74373 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
74374 index e29e0ca..fa3a6a3 100644
74375 --- a/net/rds/ib_recv.c
74376 +++ b/net/rds/ib_recv.c
74377 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74378 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
74379 int ack_required)
74380 {
74381 - atomic64_set(&ic->i_ack_next, seq);
74382 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74383 if (ack_required) {
74384 smp_mb__before_clear_bit();
74385 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74386 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74387 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74388 smp_mb__after_clear_bit();
74389
74390 - return atomic64_read(&ic->i_ack_next);
74391 + return atomic64_read_unchecked(&ic->i_ack_next);
74392 }
74393 #endif
74394
74395 diff --git a/net/rds/iw.h b/net/rds/iw.h
74396 index 04ce3b1..48119a6 100644
74397 --- a/net/rds/iw.h
74398 +++ b/net/rds/iw.h
74399 @@ -134,7 +134,7 @@ struct rds_iw_connection {
74400 /* sending acks */
74401 unsigned long i_ack_flags;
74402 #ifdef KERNEL_HAS_ATOMIC64
74403 - atomic64_t i_ack_next; /* next ACK to send */
74404 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74405 #else
74406 spinlock_t i_ack_lock; /* protect i_ack_next */
74407 u64 i_ack_next; /* next ACK to send */
74408 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
74409 index 9556d28..f046d0e 100644
74410 --- a/net/rds/iw_cm.c
74411 +++ b/net/rds/iw_cm.c
74412 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
74413 /* Clear the ACK state */
74414 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74415 #ifdef KERNEL_HAS_ATOMIC64
74416 - atomic64_set(&ic->i_ack_next, 0);
74417 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74418 #else
74419 ic->i_ack_next = 0;
74420 #endif
74421 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
74422 index 5e57347..3916042 100644
74423 --- a/net/rds/iw_recv.c
74424 +++ b/net/rds/iw_recv.c
74425 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74426 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
74427 int ack_required)
74428 {
74429 - atomic64_set(&ic->i_ack_next, seq);
74430 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74431 if (ack_required) {
74432 smp_mb__before_clear_bit();
74433 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74434 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74435 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74436 smp_mb__after_clear_bit();
74437
74438 - return atomic64_read(&ic->i_ack_next);
74439 + return atomic64_read_unchecked(&ic->i_ack_next);
74440 }
74441 #endif
74442
74443 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
74444 index edac9ef..16bcb98 100644
74445 --- a/net/rds/tcp.c
74446 +++ b/net/rds/tcp.c
74447 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
74448 int val = 1;
74449
74450 set_fs(KERNEL_DS);
74451 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
74452 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
74453 sizeof(val));
74454 set_fs(oldfs);
74455 }
74456 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
74457 index 1b4fd68..2234175 100644
74458 --- a/net/rds/tcp_send.c
74459 +++ b/net/rds/tcp_send.c
74460 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
74461
74462 oldfs = get_fs();
74463 set_fs(KERNEL_DS);
74464 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
74465 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
74466 sizeof(val));
74467 set_fs(oldfs);
74468 }
74469 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
74470 index 74c064c..fdec26f 100644
74471 --- a/net/rxrpc/af_rxrpc.c
74472 +++ b/net/rxrpc/af_rxrpc.c
74473 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
74474 __be32 rxrpc_epoch;
74475
74476 /* current debugging ID */
74477 -atomic_t rxrpc_debug_id;
74478 +atomic_unchecked_t rxrpc_debug_id;
74479
74480 /* count of skbs currently in use */
74481 atomic_t rxrpc_n_skbs;
74482 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
74483 index f99cfce..cc529dd 100644
74484 --- a/net/rxrpc/ar-ack.c
74485 +++ b/net/rxrpc/ar-ack.c
74486 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74487
74488 _enter("{%d,%d,%d,%d},",
74489 call->acks_hard, call->acks_unacked,
74490 - atomic_read(&call->sequence),
74491 + atomic_read_unchecked(&call->sequence),
74492 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
74493
74494 stop = 0;
74495 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74496
74497 /* each Tx packet has a new serial number */
74498 sp->hdr.serial =
74499 - htonl(atomic_inc_return(&call->conn->serial));
74500 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
74501
74502 hdr = (struct rxrpc_header *) txb->head;
74503 hdr->serial = sp->hdr.serial;
74504 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
74505 */
74506 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
74507 {
74508 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
74509 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
74510 }
74511
74512 /*
74513 @@ -629,7 +629,7 @@ process_further:
74514
74515 latest = ntohl(sp->hdr.serial);
74516 hard = ntohl(ack.firstPacket);
74517 - tx = atomic_read(&call->sequence);
74518 + tx = atomic_read_unchecked(&call->sequence);
74519
74520 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74521 latest,
74522 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
74523 goto maybe_reschedule;
74524
74525 send_ACK_with_skew:
74526 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
74527 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
74528 ntohl(ack.serial));
74529 send_ACK:
74530 mtu = call->conn->trans->peer->if_mtu;
74531 @@ -1173,7 +1173,7 @@ send_ACK:
74532 ackinfo.rxMTU = htonl(5692);
74533 ackinfo.jumbo_max = htonl(4);
74534
74535 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74536 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74537 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74538 ntohl(hdr.serial),
74539 ntohs(ack.maxSkew),
74540 @@ -1191,7 +1191,7 @@ send_ACK:
74541 send_message:
74542 _debug("send message");
74543
74544 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74545 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74546 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
74547 send_message_2:
74548
74549 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
74550 index bf656c2..48f9d27 100644
74551 --- a/net/rxrpc/ar-call.c
74552 +++ b/net/rxrpc/ar-call.c
74553 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
74554 spin_lock_init(&call->lock);
74555 rwlock_init(&call->state_lock);
74556 atomic_set(&call->usage, 1);
74557 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
74558 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74559 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
74560
74561 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
74562 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
74563 index 4106ca9..a338d7a 100644
74564 --- a/net/rxrpc/ar-connection.c
74565 +++ b/net/rxrpc/ar-connection.c
74566 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
74567 rwlock_init(&conn->lock);
74568 spin_lock_init(&conn->state_lock);
74569 atomic_set(&conn->usage, 1);
74570 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
74571 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74572 conn->avail_calls = RXRPC_MAXCALLS;
74573 conn->size_align = 4;
74574 conn->header_size = sizeof(struct rxrpc_header);
74575 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
74576 index e7ed43a..6afa140 100644
74577 --- a/net/rxrpc/ar-connevent.c
74578 +++ b/net/rxrpc/ar-connevent.c
74579 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
74580
74581 len = iov[0].iov_len + iov[1].iov_len;
74582
74583 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
74584 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74585 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
74586
74587 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74588 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
74589 index 1a2b0633..e8d1382 100644
74590 --- a/net/rxrpc/ar-input.c
74591 +++ b/net/rxrpc/ar-input.c
74592 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
74593 /* track the latest serial number on this connection for ACK packet
74594 * information */
74595 serial = ntohl(sp->hdr.serial);
74596 - hi_serial = atomic_read(&call->conn->hi_serial);
74597 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
74598 while (serial > hi_serial)
74599 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
74600 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
74601 serial);
74602
74603 /* request ACK generation for any ACK or DATA packet that requests
74604 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
74605 index 8e22bd3..f66d1c0 100644
74606 --- a/net/rxrpc/ar-internal.h
74607 +++ b/net/rxrpc/ar-internal.h
74608 @@ -272,8 +272,8 @@ struct rxrpc_connection {
74609 int error; /* error code for local abort */
74610 int debug_id; /* debug ID for printks */
74611 unsigned call_counter; /* call ID counter */
74612 - atomic_t serial; /* packet serial number counter */
74613 - atomic_t hi_serial; /* highest serial number received */
74614 + atomic_unchecked_t serial; /* packet serial number counter */
74615 + atomic_unchecked_t hi_serial; /* highest serial number received */
74616 u8 avail_calls; /* number of calls available */
74617 u8 size_align; /* data size alignment (for security) */
74618 u8 header_size; /* rxrpc + security header size */
74619 @@ -346,7 +346,7 @@ struct rxrpc_call {
74620 spinlock_t lock;
74621 rwlock_t state_lock; /* lock for state transition */
74622 atomic_t usage;
74623 - atomic_t sequence; /* Tx data packet sequence counter */
74624 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
74625 u32 abort_code; /* local/remote abort code */
74626 enum { /* current state of call */
74627 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
74628 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
74629 */
74630 extern atomic_t rxrpc_n_skbs;
74631 extern __be32 rxrpc_epoch;
74632 -extern atomic_t rxrpc_debug_id;
74633 +extern atomic_unchecked_t rxrpc_debug_id;
74634 extern struct workqueue_struct *rxrpc_workqueue;
74635
74636 /*
74637 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
74638 index 87f7135..74d3703 100644
74639 --- a/net/rxrpc/ar-local.c
74640 +++ b/net/rxrpc/ar-local.c
74641 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
74642 spin_lock_init(&local->lock);
74643 rwlock_init(&local->services_lock);
74644 atomic_set(&local->usage, 1);
74645 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
74646 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74647 memcpy(&local->srx, srx, sizeof(*srx));
74648 }
74649
74650 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
74651 index 338d793..47391d0 100644
74652 --- a/net/rxrpc/ar-output.c
74653 +++ b/net/rxrpc/ar-output.c
74654 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
74655 sp->hdr.cid = call->cid;
74656 sp->hdr.callNumber = call->call_id;
74657 sp->hdr.seq =
74658 - htonl(atomic_inc_return(&call->sequence));
74659 + htonl(atomic_inc_return_unchecked(&call->sequence));
74660 sp->hdr.serial =
74661 - htonl(atomic_inc_return(&conn->serial));
74662 + htonl(atomic_inc_return_unchecked(&conn->serial));
74663 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
74664 sp->hdr.userStatus = 0;
74665 sp->hdr.securityIndex = conn->security_ix;
74666 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
74667 index 2754f09..b20e38f 100644
74668 --- a/net/rxrpc/ar-peer.c
74669 +++ b/net/rxrpc/ar-peer.c
74670 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
74671 INIT_LIST_HEAD(&peer->error_targets);
74672 spin_lock_init(&peer->lock);
74673 atomic_set(&peer->usage, 1);
74674 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
74675 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74676 memcpy(&peer->srx, srx, sizeof(*srx));
74677
74678 rxrpc_assess_MTU_size(peer);
74679 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
74680 index 38047f7..9f48511 100644
74681 --- a/net/rxrpc/ar-proc.c
74682 +++ b/net/rxrpc/ar-proc.c
74683 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
74684 atomic_read(&conn->usage),
74685 rxrpc_conn_states[conn->state],
74686 key_serial(conn->key),
74687 - atomic_read(&conn->serial),
74688 - atomic_read(&conn->hi_serial));
74689 + atomic_read_unchecked(&conn->serial),
74690 + atomic_read_unchecked(&conn->hi_serial));
74691
74692 return 0;
74693 }
74694 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
74695 index 92df566..87ec1bf 100644
74696 --- a/net/rxrpc/ar-transport.c
74697 +++ b/net/rxrpc/ar-transport.c
74698 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
74699 spin_lock_init(&trans->client_lock);
74700 rwlock_init(&trans->conn_lock);
74701 atomic_set(&trans->usage, 1);
74702 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
74703 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74704
74705 if (peer->srx.transport.family == AF_INET) {
74706 switch (peer->srx.transport_type) {
74707 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
74708 index 7635107..4670276 100644
74709 --- a/net/rxrpc/rxkad.c
74710 +++ b/net/rxrpc/rxkad.c
74711 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
74712
74713 len = iov[0].iov_len + iov[1].iov_len;
74714
74715 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
74716 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74717 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
74718
74719 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74720 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
74721
74722 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
74723
74724 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
74725 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74726 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
74727
74728 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
74729 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
74730 index 1e2eee8..ce3967e 100644
74731 --- a/net/sctp/proc.c
74732 +++ b/net/sctp/proc.c
74733 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
74734 seq_printf(seq,
74735 "%8pK %8pK %-3d %-3d %-2d %-4d "
74736 "%4d %8d %8d %7d %5lu %-5d %5d ",
74737 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
74738 + assoc, sk,
74739 + sctp_sk(sk)->type, sk->sk_state,
74740 assoc->state, hash,
74741 assoc->assoc_id,
74742 assoc->sndbuf_used,
74743 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
74744 index 54a7cd2..944edae 100644
74745 --- a/net/sctp/socket.c
74746 +++ b/net/sctp/socket.c
74747 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
74748 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
74749 if (space_left < addrlen)
74750 return -ENOMEM;
74751 - if (copy_to_user(to, &temp, addrlen))
74752 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
74753 return -EFAULT;
74754 to += addrlen;
74755 cnt++;
74756 diff --git a/net/socket.c b/net/socket.c
74757 index 2dce67a..1e91168 100644
74758 --- a/net/socket.c
74759 +++ b/net/socket.c
74760 @@ -88,6 +88,7 @@
74761 #include <linux/nsproxy.h>
74762 #include <linux/magic.h>
74763 #include <linux/slab.h>
74764 +#include <linux/in.h>
74765
74766 #include <asm/uaccess.h>
74767 #include <asm/unistd.h>
74768 @@ -105,6 +106,8 @@
74769 #include <linux/sockios.h>
74770 #include <linux/atalk.h>
74771
74772 +#include <linux/grsock.h>
74773 +
74774 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
74775 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
74776 unsigned long nr_segs, loff_t pos);
74777 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
74778 &sockfs_dentry_operations, SOCKFS_MAGIC);
74779 }
74780
74781 -static struct vfsmount *sock_mnt __read_mostly;
74782 +struct vfsmount *sock_mnt __read_mostly;
74783
74784 static struct file_system_type sock_fs_type = {
74785 .name = "sockfs",
74786 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
74787 return -EAFNOSUPPORT;
74788 if (type < 0 || type >= SOCK_MAX)
74789 return -EINVAL;
74790 + if (protocol < 0)
74791 + return -EINVAL;
74792
74793 /* Compatibility.
74794
74795 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
74796 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
74797 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
74798
74799 + if(!gr_search_socket(family, type, protocol)) {
74800 + retval = -EACCES;
74801 + goto out;
74802 + }
74803 +
74804 + if (gr_handle_sock_all(family, type, protocol)) {
74805 + retval = -EACCES;
74806 + goto out;
74807 + }
74808 +
74809 retval = sock_create(family, type, protocol, &sock);
74810 if (retval < 0)
74811 goto out;
74812 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
74813 if (sock) {
74814 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
74815 if (err >= 0) {
74816 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
74817 + err = -EACCES;
74818 + goto error;
74819 + }
74820 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
74821 + if (err)
74822 + goto error;
74823 +
74824 err = security_socket_bind(sock,
74825 (struct sockaddr *)&address,
74826 addrlen);
74827 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
74828 (struct sockaddr *)
74829 &address, addrlen);
74830 }
74831 +error:
74832 fput_light(sock->file, fput_needed);
74833 }
74834 return err;
74835 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
74836 if ((unsigned)backlog > somaxconn)
74837 backlog = somaxconn;
74838
74839 + if (gr_handle_sock_server_other(sock->sk)) {
74840 + err = -EPERM;
74841 + goto error;
74842 + }
74843 +
74844 + err = gr_search_listen(sock);
74845 + if (err)
74846 + goto error;
74847 +
74848 err = security_socket_listen(sock, backlog);
74849 if (!err)
74850 err = sock->ops->listen(sock, backlog);
74851
74852 +error:
74853 fput_light(sock->file, fput_needed);
74854 }
74855 return err;
74856 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
74857 newsock->type = sock->type;
74858 newsock->ops = sock->ops;
74859
74860 + if (gr_handle_sock_server_other(sock->sk)) {
74861 + err = -EPERM;
74862 + sock_release(newsock);
74863 + goto out_put;
74864 + }
74865 +
74866 + err = gr_search_accept(sock);
74867 + if (err) {
74868 + sock_release(newsock);
74869 + goto out_put;
74870 + }
74871 +
74872 /*
74873 * We don't need try_module_get here, as the listening socket (sock)
74874 * has the protocol module (sock->ops->owner) held.
74875 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
74876 fd_install(newfd, newfile);
74877 err = newfd;
74878
74879 + gr_attach_curr_ip(newsock->sk);
74880 +
74881 out_put:
74882 fput_light(sock->file, fput_needed);
74883 out:
74884 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
74885 int, addrlen)
74886 {
74887 struct socket *sock;
74888 + struct sockaddr *sck;
74889 struct sockaddr_storage address;
74890 int err, fput_needed;
74891
74892 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
74893 if (err < 0)
74894 goto out_put;
74895
74896 + sck = (struct sockaddr *)&address;
74897 +
74898 + if (gr_handle_sock_client(sck)) {
74899 + err = -EACCES;
74900 + goto out_put;
74901 + }
74902 +
74903 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
74904 + if (err)
74905 + goto out_put;
74906 +
74907 err =
74908 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
74909 if (err)
74910 @@ -1950,7 +2010,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
74911 * checking falls down on this.
74912 */
74913 if (copy_from_user(ctl_buf,
74914 - (void __user __force *)msg_sys->msg_control,
74915 + (void __force_user *)msg_sys->msg_control,
74916 ctl_len))
74917 goto out_freectl;
74918 msg_sys->msg_control = ctl_buf;
74919 @@ -2120,7 +2180,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
74920 * kernel msghdr to use the kernel address space)
74921 */
74922
74923 - uaddr = (__force void __user *)msg_sys->msg_name;
74924 + uaddr = (void __force_user *)msg_sys->msg_name;
74925 uaddr_len = COMPAT_NAMELEN(msg);
74926 if (MSG_CMSG_COMPAT & flags) {
74927 err = verify_compat_iovec(msg_sys, iov,
74928 @@ -2748,7 +2808,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
74929 }
74930
74931 ifr = compat_alloc_user_space(buf_size);
74932 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
74933 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
74934
74935 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
74936 return -EFAULT;
74937 @@ -2772,12 +2832,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
74938 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
74939
74940 if (copy_in_user(rxnfc, compat_rxnfc,
74941 - (void *)(&rxnfc->fs.m_ext + 1) -
74942 - (void *)rxnfc) ||
74943 + (void __user *)(&rxnfc->fs.m_ext + 1) -
74944 + (void __user *)rxnfc) ||
74945 copy_in_user(&rxnfc->fs.ring_cookie,
74946 &compat_rxnfc->fs.ring_cookie,
74947 - (void *)(&rxnfc->fs.location + 1) -
74948 - (void *)&rxnfc->fs.ring_cookie) ||
74949 + (void __user *)(&rxnfc->fs.location + 1) -
74950 + (void __user *)&rxnfc->fs.ring_cookie) ||
74951 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
74952 sizeof(rxnfc->rule_cnt)))
74953 return -EFAULT;
74954 @@ -2789,12 +2849,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
74955
74956 if (convert_out) {
74957 if (copy_in_user(compat_rxnfc, rxnfc,
74958 - (const void *)(&rxnfc->fs.m_ext + 1) -
74959 - (const void *)rxnfc) ||
74960 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
74961 + (const void __user *)rxnfc) ||
74962 copy_in_user(&compat_rxnfc->fs.ring_cookie,
74963 &rxnfc->fs.ring_cookie,
74964 - (const void *)(&rxnfc->fs.location + 1) -
74965 - (const void *)&rxnfc->fs.ring_cookie) ||
74966 + (const void __user *)(&rxnfc->fs.location + 1) -
74967 + (const void __user *)&rxnfc->fs.ring_cookie) ||
74968 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
74969 sizeof(rxnfc->rule_cnt)))
74970 return -EFAULT;
74971 @@ -2864,7 +2924,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
74972 old_fs = get_fs();
74973 set_fs(KERNEL_DS);
74974 err = dev_ioctl(net, cmd,
74975 - (struct ifreq __user __force *) &kifr);
74976 + (struct ifreq __force_user *) &kifr);
74977 set_fs(old_fs);
74978
74979 return err;
74980 @@ -2973,7 +3033,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
74981
74982 old_fs = get_fs();
74983 set_fs(KERNEL_DS);
74984 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
74985 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
74986 set_fs(old_fs);
74987
74988 if (cmd == SIOCGIFMAP && !err) {
74989 @@ -3078,7 +3138,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
74990 ret |= __get_user(rtdev, &(ur4->rt_dev));
74991 if (rtdev) {
74992 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
74993 - r4.rt_dev = (char __user __force *)devname;
74994 + r4.rt_dev = (char __force_user *)devname;
74995 devname[15] = 0;
74996 } else
74997 r4.rt_dev = NULL;
74998 @@ -3318,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
74999 int __user *uoptlen;
75000 int err;
75001
75002 - uoptval = (char __user __force *) optval;
75003 - uoptlen = (int __user __force *) optlen;
75004 + uoptval = (char __force_user *) optval;
75005 + uoptlen = (int __force_user *) optlen;
75006
75007 set_fs(KERNEL_DS);
75008 if (level == SOL_SOCKET)
75009 @@ -3339,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
75010 char __user *uoptval;
75011 int err;
75012
75013 - uoptval = (char __user __force *) optval;
75014 + uoptval = (char __force_user *) optval;
75015
75016 set_fs(KERNEL_DS);
75017 if (level == SOL_SOCKET)
75018 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
75019 index 00a1a2a..6a0138a 100644
75020 --- a/net/sunrpc/sched.c
75021 +++ b/net/sunrpc/sched.c
75022 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
75023 #ifdef RPC_DEBUG
75024 static void rpc_task_set_debuginfo(struct rpc_task *task)
75025 {
75026 - static atomic_t rpc_pid;
75027 + static atomic_unchecked_t rpc_pid;
75028
75029 - task->tk_pid = atomic_inc_return(&rpc_pid);
75030 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
75031 }
75032 #else
75033 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
75034 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
75035 index 71bed1c..5dff36d 100644
75036 --- a/net/sunrpc/svcsock.c
75037 +++ b/net/sunrpc/svcsock.c
75038 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
75039 int buflen, unsigned int base)
75040 {
75041 size_t save_iovlen;
75042 - void __user *save_iovbase;
75043 + void *save_iovbase;
75044 unsigned int i;
75045 int ret;
75046
75047 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
75048 index 09af4fa..77110a9 100644
75049 --- a/net/sunrpc/xprtrdma/svc_rdma.c
75050 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
75051 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
75052 static unsigned int min_max_inline = 4096;
75053 static unsigned int max_max_inline = 65536;
75054
75055 -atomic_t rdma_stat_recv;
75056 -atomic_t rdma_stat_read;
75057 -atomic_t rdma_stat_write;
75058 -atomic_t rdma_stat_sq_starve;
75059 -atomic_t rdma_stat_rq_starve;
75060 -atomic_t rdma_stat_rq_poll;
75061 -atomic_t rdma_stat_rq_prod;
75062 -atomic_t rdma_stat_sq_poll;
75063 -atomic_t rdma_stat_sq_prod;
75064 +atomic_unchecked_t rdma_stat_recv;
75065 +atomic_unchecked_t rdma_stat_read;
75066 +atomic_unchecked_t rdma_stat_write;
75067 +atomic_unchecked_t rdma_stat_sq_starve;
75068 +atomic_unchecked_t rdma_stat_rq_starve;
75069 +atomic_unchecked_t rdma_stat_rq_poll;
75070 +atomic_unchecked_t rdma_stat_rq_prod;
75071 +atomic_unchecked_t rdma_stat_sq_poll;
75072 +atomic_unchecked_t rdma_stat_sq_prod;
75073
75074 /* Temporary NFS request map and context caches */
75075 struct kmem_cache *svc_rdma_map_cachep;
75076 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
75077 len -= *ppos;
75078 if (len > *lenp)
75079 len = *lenp;
75080 - if (len && copy_to_user(buffer, str_buf, len))
75081 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
75082 return -EFAULT;
75083 *lenp = len;
75084 *ppos += len;
75085 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
75086 {
75087 .procname = "rdma_stat_read",
75088 .data = &rdma_stat_read,
75089 - .maxlen = sizeof(atomic_t),
75090 + .maxlen = sizeof(atomic_unchecked_t),
75091 .mode = 0644,
75092 .proc_handler = read_reset_stat,
75093 },
75094 {
75095 .procname = "rdma_stat_recv",
75096 .data = &rdma_stat_recv,
75097 - .maxlen = sizeof(atomic_t),
75098 + .maxlen = sizeof(atomic_unchecked_t),
75099 .mode = 0644,
75100 .proc_handler = read_reset_stat,
75101 },
75102 {
75103 .procname = "rdma_stat_write",
75104 .data = &rdma_stat_write,
75105 - .maxlen = sizeof(atomic_t),
75106 + .maxlen = sizeof(atomic_unchecked_t),
75107 .mode = 0644,
75108 .proc_handler = read_reset_stat,
75109 },
75110 {
75111 .procname = "rdma_stat_sq_starve",
75112 .data = &rdma_stat_sq_starve,
75113 - .maxlen = sizeof(atomic_t),
75114 + .maxlen = sizeof(atomic_unchecked_t),
75115 .mode = 0644,
75116 .proc_handler = read_reset_stat,
75117 },
75118 {
75119 .procname = "rdma_stat_rq_starve",
75120 .data = &rdma_stat_rq_starve,
75121 - .maxlen = sizeof(atomic_t),
75122 + .maxlen = sizeof(atomic_unchecked_t),
75123 .mode = 0644,
75124 .proc_handler = read_reset_stat,
75125 },
75126 {
75127 .procname = "rdma_stat_rq_poll",
75128 .data = &rdma_stat_rq_poll,
75129 - .maxlen = sizeof(atomic_t),
75130 + .maxlen = sizeof(atomic_unchecked_t),
75131 .mode = 0644,
75132 .proc_handler = read_reset_stat,
75133 },
75134 {
75135 .procname = "rdma_stat_rq_prod",
75136 .data = &rdma_stat_rq_prod,
75137 - .maxlen = sizeof(atomic_t),
75138 + .maxlen = sizeof(atomic_unchecked_t),
75139 .mode = 0644,
75140 .proc_handler = read_reset_stat,
75141 },
75142 {
75143 .procname = "rdma_stat_sq_poll",
75144 .data = &rdma_stat_sq_poll,
75145 - .maxlen = sizeof(atomic_t),
75146 + .maxlen = sizeof(atomic_unchecked_t),
75147 .mode = 0644,
75148 .proc_handler = read_reset_stat,
75149 },
75150 {
75151 .procname = "rdma_stat_sq_prod",
75152 .data = &rdma_stat_sq_prod,
75153 - .maxlen = sizeof(atomic_t),
75154 + .maxlen = sizeof(atomic_unchecked_t),
75155 .mode = 0644,
75156 .proc_handler = read_reset_stat,
75157 },
75158 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75159 index df67211..c354b13 100644
75160 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75161 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75162 @@ -499,7 +499,7 @@ next_sge:
75163 svc_rdma_put_context(ctxt, 0);
75164 goto out;
75165 }
75166 - atomic_inc(&rdma_stat_read);
75167 + atomic_inc_unchecked(&rdma_stat_read);
75168
75169 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
75170 chl_map->ch[ch_no].count -= read_wr.num_sge;
75171 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75172 dto_q);
75173 list_del_init(&ctxt->dto_q);
75174 } else {
75175 - atomic_inc(&rdma_stat_rq_starve);
75176 + atomic_inc_unchecked(&rdma_stat_rq_starve);
75177 clear_bit(XPT_DATA, &xprt->xpt_flags);
75178 ctxt = NULL;
75179 }
75180 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75181 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
75182 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
75183 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
75184 - atomic_inc(&rdma_stat_recv);
75185 + atomic_inc_unchecked(&rdma_stat_recv);
75186
75187 /* Build up the XDR from the receive buffers. */
75188 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
75189 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75190 index 249a835..fb2794b 100644
75191 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75192 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75193 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
75194 write_wr.wr.rdma.remote_addr = to;
75195
75196 /* Post It */
75197 - atomic_inc(&rdma_stat_write);
75198 + atomic_inc_unchecked(&rdma_stat_write);
75199 if (svc_rdma_send(xprt, &write_wr))
75200 goto err;
75201 return 0;
75202 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75203 index ba1296d..0fec1a5 100644
75204 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
75205 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75206 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75207 return;
75208
75209 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
75210 - atomic_inc(&rdma_stat_rq_poll);
75211 + atomic_inc_unchecked(&rdma_stat_rq_poll);
75212
75213 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
75214 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
75215 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75216 }
75217
75218 if (ctxt)
75219 - atomic_inc(&rdma_stat_rq_prod);
75220 + atomic_inc_unchecked(&rdma_stat_rq_prod);
75221
75222 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
75223 /*
75224 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75225 return;
75226
75227 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
75228 - atomic_inc(&rdma_stat_sq_poll);
75229 + atomic_inc_unchecked(&rdma_stat_sq_poll);
75230 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
75231 if (wc.status != IB_WC_SUCCESS)
75232 /* Close the transport */
75233 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75234 }
75235
75236 if (ctxt)
75237 - atomic_inc(&rdma_stat_sq_prod);
75238 + atomic_inc_unchecked(&rdma_stat_sq_prod);
75239 }
75240
75241 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
75242 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
75243 spin_lock_bh(&xprt->sc_lock);
75244 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
75245 spin_unlock_bh(&xprt->sc_lock);
75246 - atomic_inc(&rdma_stat_sq_starve);
75247 + atomic_inc_unchecked(&rdma_stat_sq_starve);
75248
75249 /* See if we can opportunistically reap SQ WR to make room */
75250 sq_cq_reap(xprt);
75251 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
75252 index e758139..d29ea47 100644
75253 --- a/net/sysctl_net.c
75254 +++ b/net/sysctl_net.c
75255 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
75256 struct ctl_table *table)
75257 {
75258 /* Allow network administrator to have same access as root. */
75259 - if (capable(CAP_NET_ADMIN)) {
75260 + if (capable_nolog(CAP_NET_ADMIN)) {
75261 int mode = (table->mode >> 6) & 7;
75262 return (mode << 6) | (mode << 3) | mode;
75263 }
75264 diff --git a/net/tipc/link.c b/net/tipc/link.c
75265 index ae98a72..7bb6056 100644
75266 --- a/net/tipc/link.c
75267 +++ b/net/tipc/link.c
75268 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
75269 struct tipc_msg fragm_hdr;
75270 struct sk_buff *buf, *buf_chain, *prev;
75271 u32 fragm_crs, fragm_rest, hsz, sect_rest;
75272 - const unchar *sect_crs;
75273 + const unchar __user *sect_crs;
75274 int curr_sect;
75275 u32 fragm_no;
75276
75277 @@ -1247,7 +1247,7 @@ again:
75278
75279 if (!sect_rest) {
75280 sect_rest = msg_sect[++curr_sect].iov_len;
75281 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
75282 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
75283 }
75284
75285 if (sect_rest < fragm_rest)
75286 @@ -1266,7 +1266,7 @@ error:
75287 }
75288 } else
75289 skb_copy_to_linear_data_offset(buf, fragm_crs,
75290 - sect_crs, sz);
75291 + (const void __force_kernel *)sect_crs, sz);
75292 sect_crs += sz;
75293 sect_rest -= sz;
75294 fragm_crs += sz;
75295 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
75296 index 83d5096..dcba497 100644
75297 --- a/net/tipc/msg.c
75298 +++ b/net/tipc/msg.c
75299 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
75300 msg_sect[cnt].iov_len);
75301 else
75302 skb_copy_to_linear_data_offset(*buf, pos,
75303 - msg_sect[cnt].iov_base,
75304 + (const void __force_kernel *)msg_sect[cnt].iov_base,
75305 msg_sect[cnt].iov_len);
75306 pos += msg_sect[cnt].iov_len;
75307 }
75308 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
75309 index 1983717..4d6102c 100644
75310 --- a/net/tipc/subscr.c
75311 +++ b/net/tipc/subscr.c
75312 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
75313 {
75314 struct iovec msg_sect;
75315
75316 - msg_sect.iov_base = (void *)&sub->evt;
75317 + msg_sect.iov_base = (void __force_user *)&sub->evt;
75318 msg_sect.iov_len = sizeof(struct tipc_event);
75319
75320 sub->evt.event = htohl(event, sub->swap);
75321 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
75322 index d99678a..3514a21 100644
75323 --- a/net/unix/af_unix.c
75324 +++ b/net/unix/af_unix.c
75325 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
75326 err = -ECONNREFUSED;
75327 if (!S_ISSOCK(inode->i_mode))
75328 goto put_fail;
75329 +
75330 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
75331 + err = -EACCES;
75332 + goto put_fail;
75333 + }
75334 +
75335 u = unix_find_socket_byinode(inode);
75336 if (!u)
75337 goto put_fail;
75338 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
75339 if (u) {
75340 struct dentry *dentry;
75341 dentry = unix_sk(u)->dentry;
75342 +
75343 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
75344 + err = -EPERM;
75345 + sock_put(u);
75346 + goto fail;
75347 + }
75348 +
75349 if (dentry)
75350 touch_atime(unix_sk(u)->mnt, dentry);
75351 } else
75352 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
75353 err = security_path_mknod(&path, dentry, mode, 0);
75354 if (err)
75355 goto out_mknod_drop_write;
75356 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
75357 + err = -EACCES;
75358 + goto out_mknod_drop_write;
75359 + }
75360 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
75361 out_mknod_drop_write:
75362 mnt_drop_write(path.mnt);
75363 if (err)
75364 goto out_mknod_dput;
75365 +
75366 + gr_handle_create(dentry, path.mnt);
75367 +
75368 mutex_unlock(&path.dentry->d_inode->i_mutex);
75369 dput(path.dentry);
75370 path.dentry = dentry;
75371 diff --git a/net/wireless/core.h b/net/wireless/core.h
75372 index b9ec306..b4a563e 100644
75373 --- a/net/wireless/core.h
75374 +++ b/net/wireless/core.h
75375 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
75376 struct mutex mtx;
75377
75378 /* rfkill support */
75379 - struct rfkill_ops rfkill_ops;
75380 + rfkill_ops_no_const rfkill_ops;
75381 struct rfkill *rfkill;
75382 struct work_struct rfkill_sync;
75383
75384 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
75385 index 0af7f54..c916d2f 100644
75386 --- a/net/wireless/wext-core.c
75387 +++ b/net/wireless/wext-core.c
75388 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75389 */
75390
75391 /* Support for very large requests */
75392 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
75393 - (user_length > descr->max_tokens)) {
75394 + if (user_length > descr->max_tokens) {
75395 /* Allow userspace to GET more than max so
75396 * we can support any size GET requests.
75397 * There is still a limit : -ENOMEM.
75398 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75399 }
75400 }
75401
75402 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
75403 - /*
75404 - * If this is a GET, but not NOMAX, it means that the extra
75405 - * data is not bounded by userspace, but by max_tokens. Thus
75406 - * set the length to max_tokens. This matches the extra data
75407 - * allocation.
75408 - * The driver should fill it with the number of tokens it
75409 - * provided, and it may check iwp->length rather than having
75410 - * knowledge of max_tokens. If the driver doesn't change the
75411 - * iwp->length, this ioctl just copies back max_token tokens
75412 - * filled with zeroes. Hopefully the driver isn't claiming
75413 - * them to be valid data.
75414 - */
75415 - iwp->length = descr->max_tokens;
75416 - }
75417 -
75418 err = handler(dev, info, (union iwreq_data *) iwp, extra);
75419
75420 iwp->length += essid_compat;
75421 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
75422 index 9049a5c..cfa6f5c 100644
75423 --- a/net/xfrm/xfrm_policy.c
75424 +++ b/net/xfrm/xfrm_policy.c
75425 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
75426 {
75427 policy->walk.dead = 1;
75428
75429 - atomic_inc(&policy->genid);
75430 + atomic_inc_unchecked(&policy->genid);
75431
75432 if (del_timer(&policy->timer))
75433 xfrm_pol_put(policy);
75434 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
75435 hlist_add_head(&policy->bydst, chain);
75436 xfrm_pol_hold(policy);
75437 net->xfrm.policy_count[dir]++;
75438 - atomic_inc(&flow_cache_genid);
75439 + atomic_inc_unchecked(&flow_cache_genid);
75440 if (delpol)
75441 __xfrm_policy_unlink(delpol, dir);
75442 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
75443 @@ -1530,7 +1530,7 @@ free_dst:
75444 goto out;
75445 }
75446
75447 -static int inline
75448 +static inline int
75449 xfrm_dst_alloc_copy(void **target, const void *src, int size)
75450 {
75451 if (!*target) {
75452 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
75453 return 0;
75454 }
75455
75456 -static int inline
75457 +static inline int
75458 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75459 {
75460 #ifdef CONFIG_XFRM_SUB_POLICY
75461 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75462 #endif
75463 }
75464
75465 -static int inline
75466 +static inline int
75467 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
75468 {
75469 #ifdef CONFIG_XFRM_SUB_POLICY
75470 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
75471
75472 xdst->num_pols = num_pols;
75473 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
75474 - xdst->policy_genid = atomic_read(&pols[0]->genid);
75475 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
75476
75477 return xdst;
75478 }
75479 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
75480 if (xdst->xfrm_genid != dst->xfrm->genid)
75481 return 0;
75482 if (xdst->num_pols > 0 &&
75483 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
75484 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
75485 return 0;
75486
75487 mtu = dst_mtu(dst->child);
75488 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
75489 sizeof(pol->xfrm_vec[i].saddr));
75490 pol->xfrm_vec[i].encap_family = mp->new_family;
75491 /* flush bundles */
75492 - atomic_inc(&pol->genid);
75493 + atomic_inc_unchecked(&pol->genid);
75494 }
75495 }
75496
75497 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
75498 index d2b366c..51ff91e 100644
75499 --- a/scripts/Makefile.build
75500 +++ b/scripts/Makefile.build
75501 @@ -109,7 +109,7 @@ endif
75502 endif
75503
75504 # Do not include host rules unless needed
75505 -ifneq ($(hostprogs-y)$(hostprogs-m),)
75506 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
75507 include scripts/Makefile.host
75508 endif
75509
75510 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
75511 index 686cb0d..9d653bf 100644
75512 --- a/scripts/Makefile.clean
75513 +++ b/scripts/Makefile.clean
75514 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
75515 __clean-files := $(extra-y) $(always) \
75516 $(targets) $(clean-files) \
75517 $(host-progs) \
75518 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
75519 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
75520 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
75521
75522 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
75523
75524 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
75525 index 1ac414f..a1c1451 100644
75526 --- a/scripts/Makefile.host
75527 +++ b/scripts/Makefile.host
75528 @@ -31,6 +31,7 @@
75529 # Note: Shared libraries consisting of C++ files are not supported
75530
75531 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
75532 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
75533
75534 # C code
75535 # Executables compiled from a single .c file
75536 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
75537 # Shared libaries (only .c supported)
75538 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
75539 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
75540 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
75541 # Remove .so files from "xxx-objs"
75542 host-cobjs := $(filter-out %.so,$(host-cobjs))
75543
75544 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
75545 index cb1f50c..cef2a7c 100644
75546 --- a/scripts/basic/fixdep.c
75547 +++ b/scripts/basic/fixdep.c
75548 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
75549 /*
75550 * Lookup a value in the configuration string.
75551 */
75552 -static int is_defined_config(const char *name, int len, unsigned int hash)
75553 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
75554 {
75555 struct item *aux;
75556
75557 @@ -211,10 +211,10 @@ static void clear_config(void)
75558 /*
75559 * Record the use of a CONFIG_* word.
75560 */
75561 -static void use_config(const char *m, int slen)
75562 +static void use_config(const char *m, unsigned int slen)
75563 {
75564 unsigned int hash = strhash(m, slen);
75565 - int c, i;
75566 + unsigned int c, i;
75567
75568 if (is_defined_config(m, slen, hash))
75569 return;
75570 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
75571
75572 static void parse_config_file(const char *map, size_t len)
75573 {
75574 - const int *end = (const int *) (map + len);
75575 + const unsigned int *end = (const unsigned int *) (map + len);
75576 /* start at +1, so that p can never be < map */
75577 - const int *m = (const int *) map + 1;
75578 + const unsigned int *m = (const unsigned int *) map + 1;
75579 const char *p, *q;
75580
75581 for (; m < end; m++) {
75582 @@ -406,7 +406,7 @@ static void print_deps(void)
75583 static void traps(void)
75584 {
75585 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
75586 - int *p = (int *)test;
75587 + unsigned int *p = (unsigned int *)test;
75588
75589 if (*p != INT_CONF) {
75590 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
75591 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
75592 new file mode 100644
75593 index 0000000..8729101
75594 --- /dev/null
75595 +++ b/scripts/gcc-plugin.sh
75596 @@ -0,0 +1,2 @@
75597 +#!/bin/sh
75598 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
75599 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
75600 index f936d1f..a66d95f 100644
75601 --- a/scripts/mod/file2alias.c
75602 +++ b/scripts/mod/file2alias.c
75603 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
75604 unsigned long size, unsigned long id_size,
75605 void *symval)
75606 {
75607 - int i;
75608 + unsigned int i;
75609
75610 if (size % id_size || size < id_size) {
75611 if (cross_build != 0)
75612 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
75613 /* USB is special because the bcdDevice can be matched against a numeric range */
75614 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
75615 static void do_usb_entry(struct usb_device_id *id,
75616 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
75617 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
75618 unsigned char range_lo, unsigned char range_hi,
75619 unsigned char max, struct module *mod)
75620 {
75621 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
75622 {
75623 unsigned int devlo, devhi;
75624 unsigned char chi, clo, max;
75625 - int ndigits;
75626 + unsigned int ndigits;
75627
75628 id->match_flags = TO_NATIVE(id->match_flags);
75629 id->idVendor = TO_NATIVE(id->idVendor);
75630 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
75631 for (i = 0; i < count; i++) {
75632 const char *id = (char *)devs[i].id;
75633 char acpi_id[sizeof(devs[0].id)];
75634 - int j;
75635 + unsigned int j;
75636
75637 buf_printf(&mod->dev_table_buf,
75638 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75639 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
75640
75641 for (j = 0; j < PNP_MAX_DEVICES; j++) {
75642 const char *id = (char *)card->devs[j].id;
75643 - int i2, j2;
75644 + unsigned int i2, j2;
75645 int dup = 0;
75646
75647 if (!id[0])
75648 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
75649 /* add an individual alias for every device entry */
75650 if (!dup) {
75651 char acpi_id[sizeof(card->devs[0].id)];
75652 - int k;
75653 + unsigned int k;
75654
75655 buf_printf(&mod->dev_table_buf,
75656 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75657 @@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s)
75658 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
75659 char *alias)
75660 {
75661 - int i, j;
75662 + unsigned int i, j;
75663
75664 sprintf(alias, "dmi*");
75665
75666 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
75667 index 2bd594e..d43245e 100644
75668 --- a/scripts/mod/modpost.c
75669 +++ b/scripts/mod/modpost.c
75670 @@ -919,6 +919,7 @@ enum mismatch {
75671 ANY_INIT_TO_ANY_EXIT,
75672 ANY_EXIT_TO_ANY_INIT,
75673 EXPORT_TO_INIT_EXIT,
75674 + DATA_TO_TEXT
75675 };
75676
75677 struct sectioncheck {
75678 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
75679 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
75680 .mismatch = EXPORT_TO_INIT_EXIT,
75681 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
75682 +},
75683 +/* Do not reference code from writable data */
75684 +{
75685 + .fromsec = { DATA_SECTIONS, NULL },
75686 + .tosec = { TEXT_SECTIONS, NULL },
75687 + .mismatch = DATA_TO_TEXT
75688 }
75689 };
75690
75691 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
75692 continue;
75693 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
75694 continue;
75695 - if (sym->st_value == addr)
75696 - return sym;
75697 /* Find a symbol nearby - addr are maybe negative */
75698 d = sym->st_value - addr;
75699 + if (d == 0)
75700 + return sym;
75701 if (d < 0)
75702 d = addr - sym->st_value;
75703 if (d < distance) {
75704 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
75705 tosym, prl_to, prl_to, tosym);
75706 free(prl_to);
75707 break;
75708 + case DATA_TO_TEXT:
75709 +/*
75710 + fprintf(stderr,
75711 + "The variable %s references\n"
75712 + "the %s %s%s%s\n",
75713 + fromsym, to, sec2annotation(tosec), tosym, to_p);
75714 +*/
75715 + break;
75716 }
75717 fprintf(stderr, "\n");
75718 }
75719 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
75720 static void check_sec_ref(struct module *mod, const char *modname,
75721 struct elf_info *elf)
75722 {
75723 - int i;
75724 + unsigned int i;
75725 Elf_Shdr *sechdrs = elf->sechdrs;
75726
75727 /* Walk through all sections */
75728 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
75729 va_end(ap);
75730 }
75731
75732 -void buf_write(struct buffer *buf, const char *s, int len)
75733 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
75734 {
75735 if (buf->size - buf->pos < len) {
75736 buf->size += len + SZ;
75737 @@ -1972,7 +1987,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
75738 if (fstat(fileno(file), &st) < 0)
75739 goto close_write;
75740
75741 - if (st.st_size != b->pos)
75742 + if (st.st_size != (off_t)b->pos)
75743 goto close_write;
75744
75745 tmp = NOFAIL(malloc(b->pos));
75746 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
75747 index 2031119..b5433af 100644
75748 --- a/scripts/mod/modpost.h
75749 +++ b/scripts/mod/modpost.h
75750 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
75751
75752 struct buffer {
75753 char *p;
75754 - int pos;
75755 - int size;
75756 + unsigned int pos;
75757 + unsigned int size;
75758 };
75759
75760 void __attribute__((format(printf, 2, 3)))
75761 buf_printf(struct buffer *buf, const char *fmt, ...);
75762
75763 void
75764 -buf_write(struct buffer *buf, const char *s, int len);
75765 +buf_write(struct buffer *buf, const char *s, unsigned int len);
75766
75767 struct module {
75768 struct module *next;
75769 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
75770 index 9dfcd6d..099068e 100644
75771 --- a/scripts/mod/sumversion.c
75772 +++ b/scripts/mod/sumversion.c
75773 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
75774 goto out;
75775 }
75776
75777 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
75778 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
75779 warn("writing sum in %s failed: %s\n",
75780 filename, strerror(errno));
75781 goto out;
75782 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
75783 index 5c11312..72742b5 100644
75784 --- a/scripts/pnmtologo.c
75785 +++ b/scripts/pnmtologo.c
75786 @@ -237,14 +237,14 @@ static void write_header(void)
75787 fprintf(out, " * Linux logo %s\n", logoname);
75788 fputs(" */\n\n", out);
75789 fputs("#include <linux/linux_logo.h>\n\n", out);
75790 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
75791 + fprintf(out, "static unsigned char %s_data[] = {\n",
75792 logoname);
75793 }
75794
75795 static void write_footer(void)
75796 {
75797 fputs("\n};\n\n", out);
75798 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
75799 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
75800 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
75801 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
75802 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
75803 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
75804 fputs("\n};\n\n", out);
75805
75806 /* write logo clut */
75807 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
75808 + fprintf(out, "static unsigned char %s_clut[] = {\n",
75809 logoname);
75810 write_hex_cnt = 0;
75811 for (i = 0; i < logo_clutsize; i++) {
75812 diff --git a/security/Kconfig b/security/Kconfig
75813 index 51bd5a0..eeabc9f 100644
75814 --- a/security/Kconfig
75815 +++ b/security/Kconfig
75816 @@ -4,6 +4,627 @@
75817
75818 menu "Security options"
75819
75820 +source grsecurity/Kconfig
75821 +
75822 +menu "PaX"
75823 +
75824 + config ARCH_TRACK_EXEC_LIMIT
75825 + bool
75826 +
75827 + config PAX_KERNEXEC_PLUGIN
75828 + bool
75829 +
75830 + config PAX_PER_CPU_PGD
75831 + bool
75832 +
75833 + config TASK_SIZE_MAX_SHIFT
75834 + int
75835 + depends on X86_64
75836 + default 47 if !PAX_PER_CPU_PGD
75837 + default 42 if PAX_PER_CPU_PGD
75838 +
75839 + config PAX_ENABLE_PAE
75840 + bool
75841 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
75842 +
75843 +config PAX
75844 + bool "Enable various PaX features"
75845 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
75846 + help
75847 + This allows you to enable various PaX features. PaX adds
75848 + intrusion prevention mechanisms to the kernel that reduce
75849 + the risks posed by exploitable memory corruption bugs.
75850 +
75851 +menu "PaX Control"
75852 + depends on PAX
75853 +
75854 +config PAX_SOFTMODE
75855 + bool 'Support soft mode'
75856 + help
75857 + Enabling this option will allow you to run PaX in soft mode, that
75858 + is, PaX features will not be enforced by default, only on executables
75859 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
75860 + support as they are the only way to mark executables for soft mode use.
75861 +
75862 + Soft mode can be activated by using the "pax_softmode=1" kernel command
75863 + line option on boot. Furthermore you can control various PaX features
75864 + at runtime via the entries in /proc/sys/kernel/pax.
75865 +
75866 +config PAX_EI_PAX
75867 + bool 'Use legacy ELF header marking'
75868 + help
75869 + Enabling this option will allow you to control PaX features on
75870 + a per executable basis via the 'chpax' utility available at
75871 + http://pax.grsecurity.net/. The control flags will be read from
75872 + an otherwise reserved part of the ELF header. This marking has
75873 + numerous drawbacks (no support for soft-mode, toolchain does not
75874 + know about the non-standard use of the ELF header) therefore it
75875 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
75876 + support.
75877 +
75878 + If you have applications not marked by the PT_PAX_FLAGS ELF program
75879 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
75880 + option otherwise they will not get any protection.
75881 +
75882 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
75883 + support as well, they will override the legacy EI_PAX marks.
75884 +
75885 +config PAX_PT_PAX_FLAGS
75886 + bool 'Use ELF program header marking'
75887 + help
75888 + Enabling this option will allow you to control PaX features on
75889 + a per executable basis via the 'paxctl' utility available at
75890 + http://pax.grsecurity.net/. The control flags will be read from
75891 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
75892 + has the benefits of supporting both soft mode and being fully
75893 + integrated into the toolchain (the binutils patch is available
75894 + from http://pax.grsecurity.net).
75895 +
75896 + If you have applications not marked by the PT_PAX_FLAGS ELF program
75897 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
75898 + support otherwise they will not get any protection.
75899 +
75900 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
75901 + must make sure that the marks are the same if a binary has both marks.
75902 +
75903 + Note that if you enable the legacy EI_PAX marking support as well,
75904 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
75905 +
75906 +config PAX_XATTR_PAX_FLAGS
75907 + bool 'Use filesystem extended attributes marking'
75908 + depends on EXPERT
75909 + select CIFS_XATTR if CIFS
75910 + select EXT2_FS_XATTR if EXT2_FS
75911 + select EXT3_FS_XATTR if EXT3_FS
75912 + select EXT4_FS_XATTR if EXT4_FS
75913 + select JFFS2_FS_XATTR if JFFS2_FS
75914 + select REISERFS_FS_XATTR if REISERFS_FS
75915 + select SQUASHFS_XATTR if SQUASHFS
75916 + select TMPFS_XATTR if TMPFS
75917 + select UBIFS_FS_XATTR if UBIFS_FS
75918 + help
75919 + Enabling this option will allow you to control PaX features on
75920 + a per executable basis via the 'setfattr' utility. The control
75921 + flags will be read from the user.pax.flags extended attribute of
75922 + the file. This marking has the benefit of supporting binary-only
75923 + applications that self-check themselves (e.g., skype) and would
75924 + not tolerate chpax/paxctl changes. The main drawback is that
75925 + extended attributes are not supported by some filesystems (e.g.,
75926 + isofs, udf, vfat) so copying files through such filesystems will
75927 + lose the extended attributes and these PaX markings.
75928 +
75929 + If you have applications not marked by the PT_PAX_FLAGS ELF program
75930 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
75931 + support otherwise they will not get any protection.
75932 +
75933 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
75934 + must make sure that the marks are the same if a binary has both marks.
75935 +
75936 + Note that if you enable the legacy EI_PAX marking support as well,
75937 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
75938 +
75939 +choice
75940 + prompt 'MAC system integration'
75941 + default PAX_HAVE_ACL_FLAGS
75942 + help
75943 + Mandatory Access Control systems have the option of controlling
75944 + PaX flags on a per executable basis, choose the method supported
75945 + by your particular system.
75946 +
75947 + - "none": if your MAC system does not interact with PaX,
75948 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
75949 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
75950 +
75951 + NOTE: this option is for developers/integrators only.
75952 +
75953 + config PAX_NO_ACL_FLAGS
75954 + bool 'none'
75955 +
75956 + config PAX_HAVE_ACL_FLAGS
75957 + bool 'direct'
75958 +
75959 + config PAX_HOOK_ACL_FLAGS
75960 + bool 'hook'
75961 +endchoice
75962 +
75963 +endmenu
75964 +
75965 +menu "Non-executable pages"
75966 + depends on PAX
75967 +
75968 +config PAX_NOEXEC
75969 + bool "Enforce non-executable pages"
75970 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
75971 + help
75972 + By design some architectures do not allow for protecting memory
75973 + pages against execution or even if they do, Linux does not make
75974 + use of this feature. In practice this means that if a page is
75975 + readable (such as the stack or heap) it is also executable.
75976 +
75977 + There is a well known exploit technique that makes use of this
75978 + fact and a common programming mistake where an attacker can
75979 + introduce code of his choice somewhere in the attacked program's
75980 + memory (typically the stack or the heap) and then execute it.
75981 +
75982 + If the attacked program was running with different (typically
75983 + higher) privileges than that of the attacker, then he can elevate
75984 + his own privilege level (e.g. get a root shell, write to files for
75985 + which he does not have write access to, etc).
75986 +
75987 + Enabling this option will let you choose from various features
75988 + that prevent the injection and execution of 'foreign' code in
75989 + a program.
75990 +
75991 + This will also break programs that rely on the old behaviour and
75992 + expect that dynamically allocated memory via the malloc() family
75993 + of functions is executable (which it is not). Notable examples
75994 + are the XFree86 4.x server, the java runtime and wine.
75995 +
75996 +config PAX_PAGEEXEC
75997 + bool "Paging based non-executable pages"
75998 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
75999 + select S390_SWITCH_AMODE if S390
76000 + select S390_EXEC_PROTECT if S390
76001 + select ARCH_TRACK_EXEC_LIMIT if X86_32
76002 + help
76003 + This implementation is based on the paging feature of the CPU.
76004 + On i386 without hardware non-executable bit support there is a
76005 + variable but usually low performance impact, however on Intel's
76006 + P4 core based CPUs it is very high so you should not enable this
76007 + for kernels meant to be used on such CPUs.
76008 +
76009 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
76010 + with hardware non-executable bit support there is no performance
76011 + impact, on ppc the impact is negligible.
76012 +
76013 + Note that several architectures require various emulations due to
76014 + badly designed userland ABIs, this will cause a performance impact
76015 + but will disappear as soon as userland is fixed. For example, ppc
76016 + userland MUST have been built with secure-plt by a recent toolchain.
76017 +
76018 +config PAX_SEGMEXEC
76019 + bool "Segmentation based non-executable pages"
76020 + depends on PAX_NOEXEC && X86_32
76021 + help
76022 + This implementation is based on the segmentation feature of the
76023 + CPU and has a very small performance impact, however applications
76024 + will be limited to a 1.5 GB address space instead of the normal
76025 + 3 GB.
76026 +
76027 +config PAX_EMUTRAMP
76028 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
76029 + default y if PARISC
76030 + help
76031 + There are some programs and libraries that for one reason or
76032 + another attempt to execute special small code snippets from
76033 + non-executable memory pages. Most notable examples are the
76034 + signal handler return code generated by the kernel itself and
76035 + the GCC trampolines.
76036 +
76037 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
76038 + such programs will no longer work under your kernel.
76039 +
76040 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
76041 + utilities to enable trampoline emulation for the affected programs
76042 + yet still have the protection provided by the non-executable pages.
76043 +
76044 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
76045 + your system will not even boot.
76046 +
76047 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
76048 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
76049 + for the affected files.
76050 +
76051 + NOTE: enabling this feature *may* open up a loophole in the
76052 + protection provided by non-executable pages that an attacker
76053 + could abuse. Therefore the best solution is to not have any
76054 + files on your system that would require this option. This can
76055 + be achieved by not using libc5 (which relies on the kernel
76056 + signal handler return code) and not using or rewriting programs
76057 + that make use of the nested function implementation of GCC.
76058 + Skilled users can just fix GCC itself so that it implements
76059 + nested function calls in a way that does not interfere with PaX.
76060 +
76061 +config PAX_EMUSIGRT
76062 + bool "Automatically emulate sigreturn trampolines"
76063 + depends on PAX_EMUTRAMP && PARISC
76064 + default y
76065 + help
76066 + Enabling this option will have the kernel automatically detect
76067 + and emulate signal return trampolines executing on the stack
76068 + that would otherwise lead to task termination.
76069 +
76070 + This solution is intended as a temporary one for users with
76071 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
76072 + Modula-3 runtime, etc) or executables linked to such, basically
76073 + everything that does not specify its own SA_RESTORER function in
76074 + normal executable memory like glibc 2.1+ does.
76075 +
76076 + On parisc you MUST enable this option, otherwise your system will
76077 + not even boot.
76078 +
76079 + NOTE: this feature cannot be disabled on a per executable basis
76080 + and since it *does* open up a loophole in the protection provided
76081 + by non-executable pages, the best solution is to not have any
76082 + files on your system that would require this option.
76083 +
76084 +config PAX_MPROTECT
76085 + bool "Restrict mprotect()"
76086 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
76087 + help
76088 + Enabling this option will prevent programs from
76089 + - changing the executable status of memory pages that were
76090 + not originally created as executable,
76091 + - making read-only executable pages writable again,
76092 + - creating executable pages from anonymous memory,
76093 + - making read-only-after-relocations (RELRO) data pages writable again.
76094 +
76095 + You should say Y here to complete the protection provided by
76096 + the enforcement of non-executable pages.
76097 +
76098 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76099 + this feature on a per file basis.
76100 +
76101 +config PAX_MPROTECT_COMPAT
76102 + bool "Use legacy/compat protection demoting (read help)"
76103 + depends on PAX_MPROTECT
76104 + default n
76105 + help
76106 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
76107 + by sending the proper error code to the application. For some broken
76108 + userland, this can cause problems with Python or other applications. The
76109 + current implementation however allows for applications like clamav to
76110 + detect if JIT compilation/execution is allowed and to fall back gracefully
76111 + to an interpreter-based mode if it does not. While we encourage everyone
76112 + to use the current implementation as-is and push upstream to fix broken
76113 + userland (note that the RWX logging option can assist with this), in some
76114 + environments this may not be possible. Having to disable MPROTECT
76115 + completely on certain binaries reduces the security benefit of PaX,
76116 + so this option is provided for those environments to revert to the old
76117 + behavior.
76118 +
76119 +config PAX_ELFRELOCS
76120 + bool "Allow ELF text relocations (read help)"
76121 + depends on PAX_MPROTECT
76122 + default n
76123 + help
76124 + Non-executable pages and mprotect() restrictions are effective
76125 + in preventing the introduction of new executable code into an
76126 + attacked task's address space. There remain only two venues
76127 + for this kind of attack: if the attacker can execute already
76128 + existing code in the attacked task then he can either have it
76129 + create and mmap() a file containing his code or have it mmap()
76130 + an already existing ELF library that does not have position
76131 + independent code in it and use mprotect() on it to make it
76132 + writable and copy his code there. While protecting against
76133 + the former approach is beyond PaX, the latter can be prevented
76134 + by having only PIC ELF libraries on one's system (which do not
76135 + need to relocate their code). If you are sure this is your case,
76136 + as is the case with all modern Linux distributions, then leave
76137 + this option disabled. You should say 'n' here.
76138 +
76139 +config PAX_ETEXECRELOCS
76140 + bool "Allow ELF ET_EXEC text relocations"
76141 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
76142 + select PAX_ELFRELOCS
76143 + default y
76144 + help
76145 + On some architectures there are incorrectly created applications
76146 + that require text relocations and would not work without enabling
76147 + this option. If you are an alpha, ia64 or parisc user, you should
76148 + enable this option and disable it once you have made sure that
76149 + none of your applications need it.
76150 +
76151 +config PAX_EMUPLT
76152 + bool "Automatically emulate ELF PLT"
76153 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
76154 + default y
76155 + help
76156 + Enabling this option will have the kernel automatically detect
76157 + and emulate the Procedure Linkage Table entries in ELF files.
76158 + On some architectures such entries are in writable memory, and
76159 + become non-executable leading to task termination. Therefore
76160 + it is mandatory that you enable this option on alpha, parisc,
76161 + sparc and sparc64, otherwise your system would not even boot.
76162 +
76163 + NOTE: this feature *does* open up a loophole in the protection
76164 + provided by the non-executable pages, therefore the proper
76165 + solution is to modify the toolchain to produce a PLT that does
76166 + not need to be writable.
76167 +
76168 +config PAX_DLRESOLVE
76169 + bool 'Emulate old glibc resolver stub'
76170 + depends on PAX_EMUPLT && SPARC
76171 + default n
76172 + help
76173 + This option is needed if userland has an old glibc (before 2.4)
76174 + that puts a 'save' instruction into the runtime generated resolver
76175 + stub that needs special emulation.
76176 +
76177 +config PAX_KERNEXEC
76178 + bool "Enforce non-executable kernel pages"
76179 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
76180 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
76181 + select PAX_KERNEXEC_PLUGIN if X86_64
76182 + help
76183 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
76184 + that is, enabling this option will make it harder to inject
76185 + and execute 'foreign' code in kernel memory itself.
76186 +
76187 + Note that on x86_64 kernels there is a known regression when
76188 + this feature and KVM/VMX are both enabled in the host kernel.
76189 +
76190 +choice
76191 + prompt "Return Address Instrumentation Method"
76192 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
76193 + depends on PAX_KERNEXEC_PLUGIN
76194 + help
76195 + Select the method used to instrument function pointer dereferences.
76196 + Note that binary modules cannot be instrumented by this approach.
76197 +
76198 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
76199 + bool "bts"
76200 + help
76201 + This method is compatible with binary only modules but has
76202 + a higher runtime overhead.
76203 +
76204 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
76205 + bool "or"
76206 + depends on !PARAVIRT
76207 + help
76208 + This method is incompatible with binary only modules but has
76209 + a lower runtime overhead.
76210 +endchoice
76211 +
76212 +config PAX_KERNEXEC_PLUGIN_METHOD
76213 + string
76214 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
76215 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
76216 + default ""
76217 +
76218 +config PAX_KERNEXEC_MODULE_TEXT
76219 + int "Minimum amount of memory reserved for module code"
76220 + default "4"
76221 + depends on PAX_KERNEXEC && X86_32 && MODULES
76222 + help
76223 + Due to implementation details the kernel must reserve a fixed
76224 + amount of memory for module code at compile time that cannot be
76225 + changed at runtime. Here you can specify the minimum amount
76226 + in MB that will be reserved. Due to the same implementation
76227 + details this size will always be rounded up to the next 2/4 MB
76228 + boundary (depends on PAE) so the actually available memory for
76229 + module code will usually be more than this minimum.
76230 +
76231 + The default 4 MB should be enough for most users but if you have
76232 + an excessive number of modules (e.g., most distribution configs
76233 + compile many drivers as modules) or use huge modules such as
76234 + nvidia's kernel driver, you will need to adjust this amount.
76235 + A good rule of thumb is to look at your currently loaded kernel
76236 + modules and add up their sizes.
76237 +
76238 +endmenu
76239 +
76240 +menu "Address Space Layout Randomization"
76241 + depends on PAX
76242 +
76243 +config PAX_ASLR
76244 + bool "Address Space Layout Randomization"
76245 + help
76246 + Many if not most exploit techniques rely on the knowledge of
76247 + certain addresses in the attacked program. The following options
76248 + will allow the kernel to apply a certain amount of randomization
76249 + to specific parts of the program thereby forcing an attacker to
76250 + guess them in most cases. Any failed guess will most likely crash
76251 + the attacked program which allows the kernel to detect such attempts
76252 + and react on them. PaX itself provides no reaction mechanisms,
76253 + instead it is strongly encouraged that you make use of Nergal's
76254 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
76255 + (http://www.grsecurity.net/) built-in crash detection features or
76256 + develop one yourself.
76257 +
76258 + By saying Y here you can choose to randomize the following areas:
76259 + - top of the task's kernel stack
76260 + - top of the task's userland stack
76261 + - base address for mmap() requests that do not specify one
76262 + (this includes all libraries)
76263 + - base address of the main executable
76264 +
76265 + It is strongly recommended to say Y here as address space layout
76266 + randomization has negligible impact on performance yet it provides
76267 + a very effective protection.
76268 +
76269 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76270 + this feature on a per file basis.
76271 +
76272 +config PAX_RANDKSTACK
76273 + bool "Randomize kernel stack base"
76274 + depends on X86_TSC && X86
76275 + help
76276 + By saying Y here the kernel will randomize every task's kernel
76277 + stack on every system call. This will not only force an attacker
76278 + to guess it but also prevent him from making use of possible
76279 + leaked information about it.
76280 +
76281 + Since the kernel stack is a rather scarce resource, randomization
76282 + may cause unexpected stack overflows, therefore you should very
76283 + carefully test your system. Note that once enabled in the kernel
76284 + configuration, this feature cannot be disabled on a per file basis.
76285 +
76286 +config PAX_RANDUSTACK
76287 + bool "Randomize user stack base"
76288 + depends on PAX_ASLR
76289 + help
76290 + By saying Y here the kernel will randomize every task's userland
76291 + stack. The randomization is done in two steps where the second
76292 + one may apply a big amount of shift to the top of the stack and
76293 + cause problems for programs that want to use lots of memory (more
76294 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
76295 + For this reason the second step can be controlled by 'chpax' or
76296 + 'paxctl' on a per file basis.
76297 +
76298 +config PAX_RANDMMAP
76299 + bool "Randomize mmap() base"
76300 + depends on PAX_ASLR
76301 + help
76302 + By saying Y here the kernel will use a randomized base address for
76303 + mmap() requests that do not specify one themselves. As a result
76304 + all dynamically loaded libraries will appear at random addresses
76305 + and therefore be harder to exploit by a technique where an attacker
76306 + attempts to execute library code for his purposes (e.g. spawn a
76307 + shell from an exploited program that is running at an elevated
76308 + privilege level).
76309 +
76310 + Furthermore, if a program is relinked as a dynamic ELF file, its
76311 + base address will be randomized as well, completing the full
76312 + randomization of the address space layout. Attacking such programs
76313 + becomes a guess game. You can find an example of doing this at
76314 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
76315 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
76316 +
76317 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
76318 + feature on a per file basis.
76319 +
76320 +endmenu
76321 +
76322 +menu "Miscellaneous hardening features"
76323 +
76324 +config PAX_MEMORY_SANITIZE
76325 + bool "Sanitize all freed memory"
76326 + depends on !HIBERNATION
76327 + help
76328 + By saying Y here the kernel will erase memory pages as soon as they
76329 + are freed. This in turn reduces the lifetime of data stored in the
76330 + pages, making it less likely that sensitive information such as
76331 + passwords, cryptographic secrets, etc stay in memory for too long.
76332 +
76333 + This is especially useful for programs whose runtime is short, long
76334 + lived processes and the kernel itself benefit from this as long as
76335 + they operate on whole memory pages and ensure timely freeing of pages
76336 + that may hold sensitive information.
76337 +
76338 + The tradeoff is performance impact, on a single CPU system kernel
76339 + compilation sees a 3% slowdown, other systems and workloads may vary
76340 + and you are advised to test this feature on your expected workload
76341 + before deploying it.
76342 +
76343 + Note that this feature does not protect data stored in live pages,
76344 + e.g., process memory swapped to disk may stay there for a long time.
76345 +
76346 +config PAX_MEMORY_STACKLEAK
76347 + bool "Sanitize kernel stack"
76348 + depends on X86
76349 + help
76350 + By saying Y here the kernel will erase the kernel stack before it
76351 + returns from a system call. This in turn reduces the information
76352 + that a kernel stack leak bug can reveal.
76353 +
76354 + Note that such a bug can still leak information that was put on
76355 + the stack by the current system call (the one eventually triggering
76356 + the bug) but traces of earlier system calls on the kernel stack
76357 + cannot leak anymore.
76358 +
76359 + The tradeoff is performance impact: on a single CPU system kernel
76360 + compilation sees a 1% slowdown, other systems and workloads may vary
76361 + and you are advised to test this feature on your expected workload
76362 + before deploying it.
76363 +
76364 + Note: full support for this feature requires gcc with plugin support
76365 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
76366 + versions means that functions with large enough stack frames may
76367 + leave uninitialized memory behind that may be exposed to a later
76368 + syscall leaking the stack.
76369 +
76370 +config PAX_MEMORY_UDEREF
76371 + bool "Prevent invalid userland pointer dereference"
76372 + depends on X86 && !UML_X86 && !XEN
76373 + select PAX_PER_CPU_PGD if X86_64
76374 + help
76375 + By saying Y here the kernel will be prevented from dereferencing
76376 + userland pointers in contexts where the kernel expects only kernel
76377 + pointers. This is both a useful runtime debugging feature and a
76378 + security measure that prevents exploiting a class of kernel bugs.
76379 +
76380 + The tradeoff is that some virtualization solutions may experience
76381 + a huge slowdown and therefore you should not enable this feature
76382 + for kernels meant to run in such environments. Whether a given VM
76383 + solution is affected or not is best determined by simply trying it
76384 + out, the performance impact will be obvious right on boot as this
76385 + mechanism engages from very early on. A good rule of thumb is that
76386 + VMs running on CPUs without hardware virtualization support (i.e.,
76387 + the majority of IA-32 CPUs) will likely experience the slowdown.
76388 +
76389 +config PAX_REFCOUNT
76390 + bool "Prevent various kernel object reference counter overflows"
76391 + depends on GRKERNSEC && (X86 || SPARC64)
76392 + help
76393 + By saying Y here the kernel will detect and prevent overflowing
76394 + various (but not all) kinds of object reference counters. Such
76395 + overflows can normally occur due to bugs only and are often, if
76396 + not always, exploitable.
76397 +
76398 + The tradeoff is that data structures protected by an overflowed
76399 + refcount will never be freed and therefore will leak memory. Note
76400 + that this leak also happens even without this protection but in
76401 + that case the overflow can eventually trigger the freeing of the
76402 + data structure while it is still being used elsewhere, resulting
76403 + in the exploitable situation that this feature prevents.
76404 +
76405 + Since this has a negligible performance impact, you should enable
76406 + this feature.
76407 +
76408 +config PAX_USERCOPY
76409 + bool "Harden heap object copies between kernel and userland"
76410 + depends on X86 || PPC || SPARC || ARM
76411 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
76412 + help
76413 + By saying Y here the kernel will enforce the size of heap objects
76414 + when they are copied in either direction between the kernel and
76415 + userland, even if only a part of the heap object is copied.
76416 +
76417 + Specifically, this checking prevents information leaking from the
76418 + kernel heap during kernel to userland copies (if the kernel heap
76419 + object is otherwise fully initialized) and prevents kernel heap
76420 + overflows during userland to kernel copies.
76421 +
76422 + Note that the current implementation provides the strictest bounds
76423 + checks for the SLUB allocator.
76424 +
76425 + Enabling this option also enables per-slab cache protection against
76426 + data in a given cache being copied into/out of via userland
76427 + accessors. Though the whitelist of regions will be reduced over
76428 + time, it notably protects important data structures like task structs.
76429 +
76430 + If frame pointers are enabled on x86, this option will also restrict
76431 + copies into and out of the kernel stack to local variables within a
76432 + single frame.
76433 +
76434 + Since this has a negligible performance impact, you should enable
76435 + this feature.
76436 +
76437 +endmenu
76438 +
76439 +endmenu
76440 +
76441 config KEYS
76442 bool "Enable access key retention support"
76443 help
76444 @@ -169,7 +790,7 @@ config INTEL_TXT
76445 config LSM_MMAP_MIN_ADDR
76446 int "Low address space for LSM to protect from user allocation"
76447 depends on SECURITY && SECURITY_SELINUX
76448 - default 32768 if ARM
76449 + default 32768 if ALPHA || ARM || PARISC || SPARC32
76450 default 65536
76451 help
76452 This is the portion of low virtual memory which should be protected
76453 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
76454 index 3783202..1852837 100644
76455 --- a/security/apparmor/lsm.c
76456 +++ b/security/apparmor/lsm.c
76457 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
76458 return error;
76459 }
76460
76461 -static struct security_operations apparmor_ops = {
76462 +static struct security_operations apparmor_ops __read_only = {
76463 .name = "apparmor",
76464
76465 .ptrace_access_check = apparmor_ptrace_access_check,
76466 diff --git a/security/commoncap.c b/security/commoncap.c
76467 index ee4f848..a320c64 100644
76468 --- a/security/commoncap.c
76469 +++ b/security/commoncap.c
76470 @@ -28,6 +28,7 @@
76471 #include <linux/prctl.h>
76472 #include <linux/securebits.h>
76473 #include <linux/user_namespace.h>
76474 +#include <net/sock.h>
76475
76476 /*
76477 * If a non-root user executes a setuid-root binary in
76478 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
76479
76480 int cap_netlink_recv(struct sk_buff *skb, int cap)
76481 {
76482 - if (!cap_raised(current_cap(), cap))
76483 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
76484 return -EPERM;
76485 return 0;
76486 }
76487 @@ -579,6 +580,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
76488 {
76489 const struct cred *cred = current_cred();
76490
76491 + if (gr_acl_enable_at_secure())
76492 + return 1;
76493 +
76494 if (cred->uid != 0) {
76495 if (bprm->cap_effective)
76496 return 1;
76497 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
76498 index 3ccf7ac..d73ad64 100644
76499 --- a/security/integrity/ima/ima.h
76500 +++ b/security/integrity/ima/ima.h
76501 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76502 extern spinlock_t ima_queue_lock;
76503
76504 struct ima_h_table {
76505 - atomic_long_t len; /* number of stored measurements in the list */
76506 - atomic_long_t violations;
76507 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
76508 + atomic_long_unchecked_t violations;
76509 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
76510 };
76511 extern struct ima_h_table ima_htable;
76512 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
76513 index 88a2788..581ab92 100644
76514 --- a/security/integrity/ima/ima_api.c
76515 +++ b/security/integrity/ima/ima_api.c
76516 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76517 int result;
76518
76519 /* can overflow, only indicator */
76520 - atomic_long_inc(&ima_htable.violations);
76521 + atomic_long_inc_unchecked(&ima_htable.violations);
76522
76523 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
76524 if (!entry) {
76525 diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
76526 index c5c5a72..2ad942f 100644
76527 --- a/security/integrity/ima/ima_audit.c
76528 +++ b/security/integrity/ima/ima_audit.c
76529 @@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
76530 audit_log_format(ab, " name=");
76531 audit_log_untrustedstring(ab, fname);
76532 }
76533 - if (inode)
76534 - audit_log_format(ab, " dev=%s ino=%lu",
76535 - inode->i_sb->s_id, inode->i_ino);
76536 + if (inode) {
76537 + audit_log_format(ab, " dev=");
76538 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76539 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76540 + }
76541 audit_log_format(ab, " res=%d", !result ? 0 : 1);
76542 audit_log_end(ab);
76543 }
76544 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
76545 index e1aa2b4..52027bf 100644
76546 --- a/security/integrity/ima/ima_fs.c
76547 +++ b/security/integrity/ima/ima_fs.c
76548 @@ -28,12 +28,12 @@
76549 static int valid_policy = 1;
76550 #define TMPBUFLEN 12
76551 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
76552 - loff_t *ppos, atomic_long_t *val)
76553 + loff_t *ppos, atomic_long_unchecked_t *val)
76554 {
76555 char tmpbuf[TMPBUFLEN];
76556 ssize_t len;
76557
76558 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
76559 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
76560 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
76561 }
76562
76563 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
76564 index 55a6271..ad829c3 100644
76565 --- a/security/integrity/ima/ima_queue.c
76566 +++ b/security/integrity/ima/ima_queue.c
76567 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
76568 INIT_LIST_HEAD(&qe->later);
76569 list_add_tail_rcu(&qe->later, &ima_measurements);
76570
76571 - atomic_long_inc(&ima_htable.len);
76572 + atomic_long_inc_unchecked(&ima_htable.len);
76573 key = ima_hash_key(entry->digest);
76574 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
76575 return 0;
76576 diff --git a/security/keys/compat.c b/security/keys/compat.c
76577 index 4c48e13..7abdac9 100644
76578 --- a/security/keys/compat.c
76579 +++ b/security/keys/compat.c
76580 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
76581 if (ret == 0)
76582 goto no_payload_free;
76583
76584 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
76585 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
76586
76587 if (iov != iovstack)
76588 kfree(iov);
76589 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
76590 index 0b3f5d7..892c8a6 100644
76591 --- a/security/keys/keyctl.c
76592 +++ b/security/keys/keyctl.c
76593 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
76594 /*
76595 * Copy the iovec data from userspace
76596 */
76597 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
76598 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
76599 unsigned ioc)
76600 {
76601 for (; ioc > 0; ioc--) {
76602 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
76603 * If successful, 0 will be returned.
76604 */
76605 long keyctl_instantiate_key_common(key_serial_t id,
76606 - const struct iovec *payload_iov,
76607 + const struct iovec __user *payload_iov,
76608 unsigned ioc,
76609 size_t plen,
76610 key_serial_t ringid)
76611 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
76612 [0].iov_len = plen
76613 };
76614
76615 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
76616 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
76617 }
76618
76619 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
76620 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
76621 if (ret == 0)
76622 goto no_payload_free;
76623
76624 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
76625 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
76626
76627 if (iov != iovstack)
76628 kfree(iov);
76629 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
76630 index 37a7f3b..86dc19f 100644
76631 --- a/security/keys/keyring.c
76632 +++ b/security/keys/keyring.c
76633 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
76634 ret = -EFAULT;
76635
76636 for (loop = 0; loop < klist->nkeys; loop++) {
76637 + key_serial_t serial;
76638 key = klist->keys[loop];
76639 + serial = key->serial;
76640
76641 tmp = sizeof(key_serial_t);
76642 if (tmp > buflen)
76643 tmp = buflen;
76644
76645 - if (copy_to_user(buffer,
76646 - &key->serial,
76647 - tmp) != 0)
76648 + if (copy_to_user(buffer, &serial, tmp))
76649 goto error;
76650
76651 buflen -= tmp;
76652 diff --git a/security/lsm_audit.c b/security/lsm_audit.c
76653 index 893af8a..ba9237c 100644
76654 --- a/security/lsm_audit.c
76655 +++ b/security/lsm_audit.c
76656 @@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
76657 audit_log_d_path(ab, "path=", &a->u.path);
76658
76659 inode = a->u.path.dentry->d_inode;
76660 - if (inode)
76661 - audit_log_format(ab, " dev=%s ino=%lu",
76662 - inode->i_sb->s_id,
76663 - inode->i_ino);
76664 + if (inode) {
76665 + audit_log_format(ab, " dev=");
76666 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76667 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76668 + }
76669 break;
76670 }
76671 case LSM_AUDIT_DATA_DENTRY: {
76672 @@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
76673 audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
76674
76675 inode = a->u.dentry->d_inode;
76676 - if (inode)
76677 - audit_log_format(ab, " dev=%s ino=%lu",
76678 - inode->i_sb->s_id,
76679 - inode->i_ino);
76680 + if (inode) {
76681 + audit_log_format(ab, " dev=");
76682 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76683 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76684 + }
76685 break;
76686 }
76687 case LSM_AUDIT_DATA_INODE: {
76688 @@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
76689 dentry->d_name.name);
76690 dput(dentry);
76691 }
76692 - audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
76693 - inode->i_ino);
76694 + audit_log_format(ab, " dev=");
76695 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76696 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76697 break;
76698 }
76699 case LSM_AUDIT_DATA_TASK:
76700 diff --git a/security/min_addr.c b/security/min_addr.c
76701 index f728728..6457a0c 100644
76702 --- a/security/min_addr.c
76703 +++ b/security/min_addr.c
76704 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
76705 */
76706 static void update_mmap_min_addr(void)
76707 {
76708 +#ifndef SPARC
76709 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
76710 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
76711 mmap_min_addr = dac_mmap_min_addr;
76712 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
76713 #else
76714 mmap_min_addr = dac_mmap_min_addr;
76715 #endif
76716 +#endif
76717 }
76718
76719 /*
76720 diff --git a/security/security.c b/security/security.c
76721 index e2f684a..8d62ef5 100644
76722 --- a/security/security.c
76723 +++ b/security/security.c
76724 @@ -26,8 +26,8 @@
76725 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
76726 CONFIG_DEFAULT_SECURITY;
76727
76728 -static struct security_operations *security_ops;
76729 -static struct security_operations default_security_ops = {
76730 +static struct security_operations *security_ops __read_only;
76731 +static struct security_operations default_security_ops __read_only = {
76732 .name = "default",
76733 };
76734
76735 @@ -68,7 +68,9 @@ int __init security_init(void)
76736
76737 void reset_security_ops(void)
76738 {
76739 + pax_open_kernel();
76740 security_ops = &default_security_ops;
76741 + pax_close_kernel();
76742 }
76743
76744 /* Save user chosen LSM */
76745 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
76746 index 1126c10..effb32b 100644
76747 --- a/security/selinux/hooks.c
76748 +++ b/security/selinux/hooks.c
76749 @@ -94,8 +94,6 @@
76750
76751 #define NUM_SEL_MNT_OPTS 5
76752
76753 -extern struct security_operations *security_ops;
76754 -
76755 /* SECMARK reference count */
76756 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
76757
76758 @@ -5449,7 +5447,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
76759
76760 #endif
76761
76762 -static struct security_operations selinux_ops = {
76763 +static struct security_operations selinux_ops __read_only = {
76764 .name = "selinux",
76765
76766 .ptrace_access_check = selinux_ptrace_access_check,
76767 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
76768 index b43813c..74be837 100644
76769 --- a/security/selinux/include/xfrm.h
76770 +++ b/security/selinux/include/xfrm.h
76771 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
76772
76773 static inline void selinux_xfrm_notify_policyload(void)
76774 {
76775 - atomic_inc(&flow_cache_genid);
76776 + atomic_inc_unchecked(&flow_cache_genid);
76777 }
76778 #else
76779 static inline int selinux_xfrm_enabled(void)
76780 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
76781 index 7db62b4..ee4d949 100644
76782 --- a/security/smack/smack_lsm.c
76783 +++ b/security/smack/smack_lsm.c
76784 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
76785 return 0;
76786 }
76787
76788 -struct security_operations smack_ops = {
76789 +struct security_operations smack_ops __read_only = {
76790 .name = "smack",
76791
76792 .ptrace_access_check = smack_ptrace_access_check,
76793 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
76794 index 4b327b6..646c57a 100644
76795 --- a/security/tomoyo/tomoyo.c
76796 +++ b/security/tomoyo/tomoyo.c
76797 @@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
76798 * tomoyo_security_ops is a "struct security_operations" which is used for
76799 * registering TOMOYO.
76800 */
76801 -static struct security_operations tomoyo_security_ops = {
76802 +static struct security_operations tomoyo_security_ops __read_only = {
76803 .name = "tomoyo",
76804 .cred_alloc_blank = tomoyo_cred_alloc_blank,
76805 .cred_prepare = tomoyo_cred_prepare,
76806 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
76807 index 762af68..7103453 100644
76808 --- a/sound/aoa/codecs/onyx.c
76809 +++ b/sound/aoa/codecs/onyx.c
76810 @@ -54,7 +54,7 @@ struct onyx {
76811 spdif_locked:1,
76812 analog_locked:1,
76813 original_mute:2;
76814 - int open_count;
76815 + local_t open_count;
76816 struct codec_info *codec_info;
76817
76818 /* mutex serializes concurrent access to the device
76819 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
76820 struct onyx *onyx = cii->codec_data;
76821
76822 mutex_lock(&onyx->mutex);
76823 - onyx->open_count++;
76824 + local_inc(&onyx->open_count);
76825 mutex_unlock(&onyx->mutex);
76826
76827 return 0;
76828 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
76829 struct onyx *onyx = cii->codec_data;
76830
76831 mutex_lock(&onyx->mutex);
76832 - onyx->open_count--;
76833 - if (!onyx->open_count)
76834 + if (local_dec_and_test(&onyx->open_count))
76835 onyx->spdif_locked = onyx->analog_locked = 0;
76836 mutex_unlock(&onyx->mutex);
76837
76838 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
76839 index ffd2025..df062c9 100644
76840 --- a/sound/aoa/codecs/onyx.h
76841 +++ b/sound/aoa/codecs/onyx.h
76842 @@ -11,6 +11,7 @@
76843 #include <linux/i2c.h>
76844 #include <asm/pmac_low_i2c.h>
76845 #include <asm/prom.h>
76846 +#include <asm/local.h>
76847
76848 /* PCM3052 register definitions */
76849
76850 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
76851 index 3cc4b86..af0a951 100644
76852 --- a/sound/core/oss/pcm_oss.c
76853 +++ b/sound/core/oss/pcm_oss.c
76854 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
76855 if (in_kernel) {
76856 mm_segment_t fs;
76857 fs = snd_enter_user();
76858 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
76859 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
76860 snd_leave_user(fs);
76861 } else {
76862 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
76863 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
76864 }
76865 if (ret != -EPIPE && ret != -ESTRPIPE)
76866 break;
76867 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
76868 if (in_kernel) {
76869 mm_segment_t fs;
76870 fs = snd_enter_user();
76871 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
76872 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
76873 snd_leave_user(fs);
76874 } else {
76875 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
76876 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
76877 }
76878 if (ret == -EPIPE) {
76879 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
76880 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
76881 struct snd_pcm_plugin_channel *channels;
76882 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
76883 if (!in_kernel) {
76884 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
76885 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
76886 return -EFAULT;
76887 buf = runtime->oss.buffer;
76888 }
76889 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
76890 }
76891 } else {
76892 tmp = snd_pcm_oss_write2(substream,
76893 - (const char __force *)buf,
76894 + (const char __force_kernel *)buf,
76895 runtime->oss.period_bytes, 0);
76896 if (tmp <= 0)
76897 goto err;
76898 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
76899 struct snd_pcm_runtime *runtime = substream->runtime;
76900 snd_pcm_sframes_t frames, frames1;
76901 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
76902 - char __user *final_dst = (char __force __user *)buf;
76903 + char __user *final_dst = (char __force_user *)buf;
76904 if (runtime->oss.plugin_first) {
76905 struct snd_pcm_plugin_channel *channels;
76906 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
76907 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
76908 xfer += tmp;
76909 runtime->oss.buffer_used -= tmp;
76910 } else {
76911 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
76912 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
76913 runtime->oss.period_bytes, 0);
76914 if (tmp <= 0)
76915 goto err;
76916 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
76917 size1);
76918 size1 /= runtime->channels; /* frames */
76919 fs = snd_enter_user();
76920 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
76921 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
76922 snd_leave_user(fs);
76923 }
76924 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
76925 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
76926 index 91cdf94..4085161 100644
76927 --- a/sound/core/pcm_compat.c
76928 +++ b/sound/core/pcm_compat.c
76929 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
76930 int err;
76931
76932 fs = snd_enter_user();
76933 - err = snd_pcm_delay(substream, &delay);
76934 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
76935 snd_leave_user(fs);
76936 if (err < 0)
76937 return err;
76938 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
76939 index 25ed9fe..24c46e9 100644
76940 --- a/sound/core/pcm_native.c
76941 +++ b/sound/core/pcm_native.c
76942 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
76943 switch (substream->stream) {
76944 case SNDRV_PCM_STREAM_PLAYBACK:
76945 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
76946 - (void __user *)arg);
76947 + (void __force_user *)arg);
76948 break;
76949 case SNDRV_PCM_STREAM_CAPTURE:
76950 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
76951 - (void __user *)arg);
76952 + (void __force_user *)arg);
76953 break;
76954 default:
76955 result = -EINVAL;
76956 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
76957 index 5cf8d65..912a79c 100644
76958 --- a/sound/core/seq/seq_device.c
76959 +++ b/sound/core/seq/seq_device.c
76960 @@ -64,7 +64,7 @@ struct ops_list {
76961 int argsize; /* argument size */
76962
76963 /* operators */
76964 - struct snd_seq_dev_ops ops;
76965 + struct snd_seq_dev_ops *ops;
76966
76967 /* registred devices */
76968 struct list_head dev_list; /* list of devices */
76969 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
76970
76971 mutex_lock(&ops->reg_mutex);
76972 /* copy driver operators */
76973 - ops->ops = *entry;
76974 + ops->ops = entry;
76975 ops->driver |= DRIVER_LOADED;
76976 ops->argsize = argsize;
76977
76978 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
76979 dev->name, ops->id, ops->argsize, dev->argsize);
76980 return -EINVAL;
76981 }
76982 - if (ops->ops.init_device(dev) >= 0) {
76983 + if (ops->ops->init_device(dev) >= 0) {
76984 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
76985 ops->num_init_devices++;
76986 } else {
76987 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
76988 dev->name, ops->id, ops->argsize, dev->argsize);
76989 return -EINVAL;
76990 }
76991 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
76992 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
76993 dev->status = SNDRV_SEQ_DEVICE_FREE;
76994 dev->driver_data = NULL;
76995 ops->num_init_devices--;
76996 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
76997 index f24bf9a..1f7b67c 100644
76998 --- a/sound/drivers/mts64.c
76999 +++ b/sound/drivers/mts64.c
77000 @@ -29,6 +29,7 @@
77001 #include <sound/initval.h>
77002 #include <sound/rawmidi.h>
77003 #include <sound/control.h>
77004 +#include <asm/local.h>
77005
77006 #define CARD_NAME "Miditerminal 4140"
77007 #define DRIVER_NAME "MTS64"
77008 @@ -67,7 +68,7 @@ struct mts64 {
77009 struct pardevice *pardev;
77010 int pardev_claimed;
77011
77012 - int open_count;
77013 + local_t open_count;
77014 int current_midi_output_port;
77015 int current_midi_input_port;
77016 u8 mode[MTS64_NUM_INPUT_PORTS];
77017 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77018 {
77019 struct mts64 *mts = substream->rmidi->private_data;
77020
77021 - if (mts->open_count == 0) {
77022 + if (local_read(&mts->open_count) == 0) {
77023 /* We don't need a spinlock here, because this is just called
77024 if the device has not been opened before.
77025 So there aren't any IRQs from the device */
77026 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77027
77028 msleep(50);
77029 }
77030 - ++(mts->open_count);
77031 + local_inc(&mts->open_count);
77032
77033 return 0;
77034 }
77035 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77036 struct mts64 *mts = substream->rmidi->private_data;
77037 unsigned long flags;
77038
77039 - --(mts->open_count);
77040 - if (mts->open_count == 0) {
77041 + if (local_dec_return(&mts->open_count) == 0) {
77042 /* We need the spinlock_irqsave here because we can still
77043 have IRQs at this point */
77044 spin_lock_irqsave(&mts->lock, flags);
77045 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77046
77047 msleep(500);
77048
77049 - } else if (mts->open_count < 0)
77050 - mts->open_count = 0;
77051 + } else if (local_read(&mts->open_count) < 0)
77052 + local_set(&mts->open_count, 0);
77053
77054 return 0;
77055 }
77056 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
77057 index b953fb4..1999c01 100644
77058 --- a/sound/drivers/opl4/opl4_lib.c
77059 +++ b/sound/drivers/opl4/opl4_lib.c
77060 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
77061 MODULE_DESCRIPTION("OPL4 driver");
77062 MODULE_LICENSE("GPL");
77063
77064 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
77065 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
77066 {
77067 int timeout = 10;
77068 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
77069 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
77070 index f664823..590c745 100644
77071 --- a/sound/drivers/portman2x4.c
77072 +++ b/sound/drivers/portman2x4.c
77073 @@ -48,6 +48,7 @@
77074 #include <sound/initval.h>
77075 #include <sound/rawmidi.h>
77076 #include <sound/control.h>
77077 +#include <asm/local.h>
77078
77079 #define CARD_NAME "Portman 2x4"
77080 #define DRIVER_NAME "portman"
77081 @@ -85,7 +86,7 @@ struct portman {
77082 struct pardevice *pardev;
77083 int pardev_claimed;
77084
77085 - int open_count;
77086 + local_t open_count;
77087 int mode[PORTMAN_NUM_INPUT_PORTS];
77088 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
77089 };
77090 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
77091 index 87657dd..a8268d4 100644
77092 --- a/sound/firewire/amdtp.c
77093 +++ b/sound/firewire/amdtp.c
77094 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
77095 ptr = s->pcm_buffer_pointer + data_blocks;
77096 if (ptr >= pcm->runtime->buffer_size)
77097 ptr -= pcm->runtime->buffer_size;
77098 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
77099 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
77100
77101 s->pcm_period_pointer += data_blocks;
77102 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
77103 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
77104 */
77105 void amdtp_out_stream_update(struct amdtp_out_stream *s)
77106 {
77107 - ACCESS_ONCE(s->source_node_id_field) =
77108 + ACCESS_ONCE_RW(s->source_node_id_field) =
77109 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
77110 }
77111 EXPORT_SYMBOL(amdtp_out_stream_update);
77112 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
77113 index 537a9cb..8e8c8e9 100644
77114 --- a/sound/firewire/amdtp.h
77115 +++ b/sound/firewire/amdtp.h
77116 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
77117 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
77118 struct snd_pcm_substream *pcm)
77119 {
77120 - ACCESS_ONCE(s->pcm) = pcm;
77121 + ACCESS_ONCE_RW(s->pcm) = pcm;
77122 }
77123
77124 /**
77125 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
77126 index cd094ec..eca1277 100644
77127 --- a/sound/firewire/isight.c
77128 +++ b/sound/firewire/isight.c
77129 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
77130 ptr += count;
77131 if (ptr >= runtime->buffer_size)
77132 ptr -= runtime->buffer_size;
77133 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
77134 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
77135
77136 isight->period_counter += count;
77137 if (isight->period_counter >= runtime->period_size) {
77138 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
77139 if (err < 0)
77140 return err;
77141
77142 - ACCESS_ONCE(isight->pcm_active) = true;
77143 + ACCESS_ONCE_RW(isight->pcm_active) = true;
77144
77145 return 0;
77146 }
77147 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
77148 {
77149 struct isight *isight = substream->private_data;
77150
77151 - ACCESS_ONCE(isight->pcm_active) = false;
77152 + ACCESS_ONCE_RW(isight->pcm_active) = false;
77153
77154 mutex_lock(&isight->mutex);
77155 isight_stop_streaming(isight);
77156 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
77157
77158 switch (cmd) {
77159 case SNDRV_PCM_TRIGGER_START:
77160 - ACCESS_ONCE(isight->pcm_running) = true;
77161 + ACCESS_ONCE_RW(isight->pcm_running) = true;
77162 break;
77163 case SNDRV_PCM_TRIGGER_STOP:
77164 - ACCESS_ONCE(isight->pcm_running) = false;
77165 + ACCESS_ONCE_RW(isight->pcm_running) = false;
77166 break;
77167 default:
77168 return -EINVAL;
77169 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
77170 index c94578d..0794ac1 100644
77171 --- a/sound/isa/cmi8330.c
77172 +++ b/sound/isa/cmi8330.c
77173 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
77174
77175 struct snd_pcm *pcm;
77176 struct snd_cmi8330_stream {
77177 - struct snd_pcm_ops ops;
77178 + snd_pcm_ops_no_const ops;
77179 snd_pcm_open_callback_t open;
77180 void *private_data; /* sb or wss */
77181 } streams[2];
77182 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
77183 index 733b014..56ce96f 100644
77184 --- a/sound/oss/sb_audio.c
77185 +++ b/sound/oss/sb_audio.c
77186 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
77187 buf16 = (signed short *)(localbuf + localoffs);
77188 while (c)
77189 {
77190 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77191 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77192 if (copy_from_user(lbuf8,
77193 userbuf+useroffs + p,
77194 locallen))
77195 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
77196 index 09d4648..cf234c7 100644
77197 --- a/sound/oss/swarm_cs4297a.c
77198 +++ b/sound/oss/swarm_cs4297a.c
77199 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
77200 {
77201 struct cs4297a_state *s;
77202 u32 pwr, id;
77203 - mm_segment_t fs;
77204 int rval;
77205 #ifndef CONFIG_BCM_CS4297A_CSWARM
77206 u64 cfg;
77207 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
77208 if (!rval) {
77209 char *sb1250_duart_present;
77210
77211 +#if 0
77212 + mm_segment_t fs;
77213 fs = get_fs();
77214 set_fs(KERNEL_DS);
77215 -#if 0
77216 val = SOUND_MASK_LINE;
77217 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
77218 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
77219 val = initvol[i].vol;
77220 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
77221 }
77222 + set_fs(fs);
77223 // cs4297a_write_ac97(s, 0x18, 0x0808);
77224 #else
77225 // cs4297a_write_ac97(s, 0x5e, 0x180);
77226 cs4297a_write_ac97(s, 0x02, 0x0808);
77227 cs4297a_write_ac97(s, 0x18, 0x0808);
77228 #endif
77229 - set_fs(fs);
77230
77231 list_add(&s->list, &cs4297a_devs);
77232
77233 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
77234 index 5644711..a2aebc1 100644
77235 --- a/sound/pci/hda/hda_codec.h
77236 +++ b/sound/pci/hda/hda_codec.h
77237 @@ -611,7 +611,7 @@ struct hda_bus_ops {
77238 /* notify power-up/down from codec to controller */
77239 void (*pm_notify)(struct hda_bus *bus);
77240 #endif
77241 -};
77242 +} __no_const;
77243
77244 /* template to pass to the bus constructor */
77245 struct hda_bus_template {
77246 @@ -713,6 +713,7 @@ struct hda_codec_ops {
77247 #endif
77248 void (*reboot_notify)(struct hda_codec *codec);
77249 };
77250 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
77251
77252 /* record for amp information cache */
77253 struct hda_cache_head {
77254 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
77255 struct snd_pcm_substream *substream);
77256 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
77257 struct snd_pcm_substream *substream);
77258 -};
77259 +} __no_const;
77260
77261 /* PCM information for each substream */
77262 struct hda_pcm_stream {
77263 @@ -801,7 +802,7 @@ struct hda_codec {
77264 const char *modelname; /* model name for preset */
77265
77266 /* set by patch */
77267 - struct hda_codec_ops patch_ops;
77268 + hda_codec_ops_no_const patch_ops;
77269
77270 /* PCM to create, set by patch_ops.build_pcms callback */
77271 unsigned int num_pcms;
77272 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
77273 index 0da778a..bc38b84 100644
77274 --- a/sound/pci/ice1712/ice1712.h
77275 +++ b/sound/pci/ice1712/ice1712.h
77276 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
77277 unsigned int mask_flags; /* total mask bits */
77278 struct snd_akm4xxx_ops {
77279 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
77280 - } ops;
77281 + } __no_const ops;
77282 };
77283
77284 struct snd_ice1712_spdif {
77285 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
77286 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77287 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77288 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77289 - } ops;
77290 + } __no_const ops;
77291 };
77292
77293
77294 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
77295 index 03ee4e3..be86b46 100644
77296 --- a/sound/pci/ymfpci/ymfpci_main.c
77297 +++ b/sound/pci/ymfpci/ymfpci_main.c
77298 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
77299 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
77300 break;
77301 }
77302 - if (atomic_read(&chip->interrupt_sleep_count)) {
77303 - atomic_set(&chip->interrupt_sleep_count, 0);
77304 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77305 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77306 wake_up(&chip->interrupt_sleep);
77307 }
77308 __end:
77309 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
77310 continue;
77311 init_waitqueue_entry(&wait, current);
77312 add_wait_queue(&chip->interrupt_sleep, &wait);
77313 - atomic_inc(&chip->interrupt_sleep_count);
77314 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
77315 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
77316 remove_wait_queue(&chip->interrupt_sleep, &wait);
77317 }
77318 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
77319 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
77320 spin_unlock(&chip->reg_lock);
77321
77322 - if (atomic_read(&chip->interrupt_sleep_count)) {
77323 - atomic_set(&chip->interrupt_sleep_count, 0);
77324 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77325 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77326 wake_up(&chip->interrupt_sleep);
77327 }
77328 }
77329 @@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
77330 spin_lock_init(&chip->reg_lock);
77331 spin_lock_init(&chip->voice_lock);
77332 init_waitqueue_head(&chip->interrupt_sleep);
77333 - atomic_set(&chip->interrupt_sleep_count, 0);
77334 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77335 chip->card = card;
77336 chip->pci = pci;
77337 chip->irq = -1;
77338 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
77339 index ee15337..e2187a6 100644
77340 --- a/sound/soc/soc-pcm.c
77341 +++ b/sound/soc/soc-pcm.c
77342 @@ -583,7 +583,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
77343 }
77344
77345 /* ASoC PCM operations */
77346 -static struct snd_pcm_ops soc_pcm_ops = {
77347 +static snd_pcm_ops_no_const soc_pcm_ops = {
77348 .open = soc_pcm_open,
77349 .close = soc_pcm_close,
77350 .hw_params = soc_pcm_hw_params,
77351 diff --git a/sound/usb/card.h b/sound/usb/card.h
77352 index a39edcc..1014050 100644
77353 --- a/sound/usb/card.h
77354 +++ b/sound/usb/card.h
77355 @@ -44,6 +44,7 @@ struct snd_urb_ops {
77356 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77357 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77358 };
77359 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
77360
77361 struct snd_usb_substream {
77362 struct snd_usb_stream *stream;
77363 @@ -93,7 +94,7 @@ struct snd_usb_substream {
77364 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
77365 spinlock_t lock;
77366
77367 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
77368 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
77369 int last_frame_number; /* stored frame number */
77370 int last_delay; /* stored delay */
77371 };
77372 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
77373 new file mode 100644
77374 index 0000000..b044b80
77375 --- /dev/null
77376 +++ b/tools/gcc/Makefile
77377 @@ -0,0 +1,21 @@
77378 +#CC := gcc
77379 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77380 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77381 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
77382 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
77383 +
77384 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
77385 +
77386 +hostlibs-y := constify_plugin.so
77387 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
77388 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
77389 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
77390 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
77391 +
77392 +always := $(hostlibs-y)
77393 +
77394 +constify_plugin-objs := constify_plugin.o
77395 +stackleak_plugin-objs := stackleak_plugin.o
77396 +kallocstat_plugin-objs := kallocstat_plugin.o
77397 +kernexec_plugin-objs := kernexec_plugin.o
77398 +checker_plugin-objs := checker_plugin.o
77399 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
77400 new file mode 100644
77401 index 0000000..d41b5af
77402 --- /dev/null
77403 +++ b/tools/gcc/checker_plugin.c
77404 @@ -0,0 +1,171 @@
77405 +/*
77406 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77407 + * Licensed under the GPL v2
77408 + *
77409 + * Note: the choice of the license means that the compilation process is
77410 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77411 + * but for the kernel it doesn't matter since it doesn't link against
77412 + * any of the gcc libraries
77413 + *
77414 + * gcc plugin to implement various sparse (source code checker) features
77415 + *
77416 + * TODO:
77417 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
77418 + *
77419 + * BUGS:
77420 + * - none known
77421 + */
77422 +#include "gcc-plugin.h"
77423 +#include "config.h"
77424 +#include "system.h"
77425 +#include "coretypes.h"
77426 +#include "tree.h"
77427 +#include "tree-pass.h"
77428 +#include "flags.h"
77429 +#include "intl.h"
77430 +#include "toplev.h"
77431 +#include "plugin.h"
77432 +//#include "expr.h" where are you...
77433 +#include "diagnostic.h"
77434 +#include "plugin-version.h"
77435 +#include "tm.h"
77436 +#include "function.h"
77437 +#include "basic-block.h"
77438 +#include "gimple.h"
77439 +#include "rtl.h"
77440 +#include "emit-rtl.h"
77441 +#include "tree-flow.h"
77442 +#include "target.h"
77443 +
77444 +extern void c_register_addr_space (const char *str, addr_space_t as);
77445 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
77446 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
77447 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
77448 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
77449 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
77450 +
77451 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77452 +extern rtx emit_move_insn(rtx x, rtx y);
77453 +
77454 +int plugin_is_GPL_compatible;
77455 +
77456 +static struct plugin_info checker_plugin_info = {
77457 + .version = "201111150100",
77458 +};
77459 +
77460 +#define ADDR_SPACE_KERNEL 0
77461 +#define ADDR_SPACE_FORCE_KERNEL 1
77462 +#define ADDR_SPACE_USER 2
77463 +#define ADDR_SPACE_FORCE_USER 3
77464 +#define ADDR_SPACE_IOMEM 0
77465 +#define ADDR_SPACE_FORCE_IOMEM 0
77466 +#define ADDR_SPACE_PERCPU 0
77467 +#define ADDR_SPACE_FORCE_PERCPU 0
77468 +#define ADDR_SPACE_RCU 0
77469 +#define ADDR_SPACE_FORCE_RCU 0
77470 +
77471 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
77472 +{
77473 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
77474 +}
77475 +
77476 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
77477 +{
77478 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
77479 +}
77480 +
77481 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
77482 +{
77483 + return default_addr_space_valid_pointer_mode(mode, as);
77484 +}
77485 +
77486 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
77487 +{
77488 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
77489 +}
77490 +
77491 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
77492 +{
77493 + return default_addr_space_legitimize_address(x, oldx, mode, as);
77494 +}
77495 +
77496 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
77497 +{
77498 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
77499 + return true;
77500 +
77501 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
77502 + return true;
77503 +
77504 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
77505 + return true;
77506 +
77507 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
77508 + return true;
77509 +
77510 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
77511 + return true;
77512 +
77513 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
77514 + return true;
77515 +
77516 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
77517 + return true;
77518 +
77519 + return subset == superset;
77520 +}
77521 +
77522 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
77523 +{
77524 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
77525 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
77526 +
77527 + return op;
77528 +}
77529 +
77530 +static void register_checker_address_spaces(void *event_data, void *data)
77531 +{
77532 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
77533 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
77534 + c_register_addr_space("__user", ADDR_SPACE_USER);
77535 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
77536 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
77537 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
77538 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
77539 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
77540 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
77541 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
77542 +
77543 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
77544 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
77545 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
77546 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
77547 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
77548 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
77549 + targetm.addr_space.convert = checker_addr_space_convert;
77550 +}
77551 +
77552 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77553 +{
77554 + const char * const plugin_name = plugin_info->base_name;
77555 + const int argc = plugin_info->argc;
77556 + const struct plugin_argument * const argv = plugin_info->argv;
77557 + int i;
77558 +
77559 + if (!plugin_default_version_check(version, &gcc_version)) {
77560 + error(G_("incompatible gcc/plugin versions"));
77561 + return 1;
77562 + }
77563 +
77564 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
77565 +
77566 + for (i = 0; i < argc; ++i)
77567 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77568 +
77569 + if (TARGET_64BIT == 0)
77570 + return 0;
77571 +
77572 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
77573 +
77574 + return 0;
77575 +}
77576 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
77577 new file mode 100644
77578 index 0000000..704a564
77579 --- /dev/null
77580 +++ b/tools/gcc/constify_plugin.c
77581 @@ -0,0 +1,303 @@
77582 +/*
77583 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
77584 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
77585 + * Licensed under the GPL v2, or (at your option) v3
77586 + *
77587 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
77588 + *
77589 + * Homepage:
77590 + * http://www.grsecurity.net/~ephox/const_plugin/
77591 + *
77592 + * Usage:
77593 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
77594 + * $ gcc -fplugin=constify_plugin.so test.c -O2
77595 + */
77596 +
77597 +#include "gcc-plugin.h"
77598 +#include "config.h"
77599 +#include "system.h"
77600 +#include "coretypes.h"
77601 +#include "tree.h"
77602 +#include "tree-pass.h"
77603 +#include "flags.h"
77604 +#include "intl.h"
77605 +#include "toplev.h"
77606 +#include "plugin.h"
77607 +#include "diagnostic.h"
77608 +#include "plugin-version.h"
77609 +#include "tm.h"
77610 +#include "function.h"
77611 +#include "basic-block.h"
77612 +#include "gimple.h"
77613 +#include "rtl.h"
77614 +#include "emit-rtl.h"
77615 +#include "tree-flow.h"
77616 +
77617 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
77618 +
77619 +int plugin_is_GPL_compatible;
77620 +
77621 +static struct plugin_info const_plugin_info = {
77622 + .version = "201111150100",
77623 + .help = "no-constify\tturn off constification\n",
77624 +};
77625 +
77626 +static void constify_type(tree type);
77627 +static bool walk_struct(tree node);
77628 +
77629 +static tree deconstify_type(tree old_type)
77630 +{
77631 + tree new_type, field;
77632 +
77633 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
77634 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
77635 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
77636 + DECL_FIELD_CONTEXT(field) = new_type;
77637 + TYPE_READONLY(new_type) = 0;
77638 + C_TYPE_FIELDS_READONLY(new_type) = 0;
77639 + return new_type;
77640 +}
77641 +
77642 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77643 +{
77644 + tree type;
77645 +
77646 + *no_add_attrs = true;
77647 + if (TREE_CODE(*node) == FUNCTION_DECL) {
77648 + error("%qE attribute does not apply to functions", name);
77649 + return NULL_TREE;
77650 + }
77651 +
77652 + if (TREE_CODE(*node) == VAR_DECL) {
77653 + error("%qE attribute does not apply to variables", name);
77654 + return NULL_TREE;
77655 + }
77656 +
77657 + if (TYPE_P(*node)) {
77658 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
77659 + *no_add_attrs = false;
77660 + else
77661 + error("%qE attribute applies to struct and union types only", name);
77662 + return NULL_TREE;
77663 + }
77664 +
77665 + type = TREE_TYPE(*node);
77666 +
77667 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
77668 + error("%qE attribute applies to struct and union types only", name);
77669 + return NULL_TREE;
77670 + }
77671 +
77672 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
77673 + error("%qE attribute is already applied to the type", name);
77674 + return NULL_TREE;
77675 + }
77676 +
77677 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
77678 + error("%qE attribute used on type that is not constified", name);
77679 + return NULL_TREE;
77680 + }
77681 +
77682 + if (TREE_CODE(*node) == TYPE_DECL) {
77683 + TREE_TYPE(*node) = deconstify_type(type);
77684 + TREE_READONLY(*node) = 0;
77685 + return NULL_TREE;
77686 + }
77687 +
77688 + return NULL_TREE;
77689 +}
77690 +
77691 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77692 +{
77693 + *no_add_attrs = true;
77694 + if (!TYPE_P(*node)) {
77695 + error("%qE attribute applies to types only", name);
77696 + return NULL_TREE;
77697 + }
77698 +
77699 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
77700 + error("%qE attribute applies to struct and union types only", name);
77701 + return NULL_TREE;
77702 + }
77703 +
77704 + *no_add_attrs = false;
77705 + constify_type(*node);
77706 + return NULL_TREE;
77707 +}
77708 +
77709 +static struct attribute_spec no_const_attr = {
77710 + .name = "no_const",
77711 + .min_length = 0,
77712 + .max_length = 0,
77713 + .decl_required = false,
77714 + .type_required = false,
77715 + .function_type_required = false,
77716 + .handler = handle_no_const_attribute,
77717 +#if BUILDING_GCC_VERSION >= 4007
77718 + .affects_type_identity = true
77719 +#endif
77720 +};
77721 +
77722 +static struct attribute_spec do_const_attr = {
77723 + .name = "do_const",
77724 + .min_length = 0,
77725 + .max_length = 0,
77726 + .decl_required = false,
77727 + .type_required = false,
77728 + .function_type_required = false,
77729 + .handler = handle_do_const_attribute,
77730 +#if BUILDING_GCC_VERSION >= 4007
77731 + .affects_type_identity = true
77732 +#endif
77733 +};
77734 +
77735 +static void register_attributes(void *event_data, void *data)
77736 +{
77737 + register_attribute(&no_const_attr);
77738 + register_attribute(&do_const_attr);
77739 +}
77740 +
77741 +static void constify_type(tree type)
77742 +{
77743 + TYPE_READONLY(type) = 1;
77744 + C_TYPE_FIELDS_READONLY(type) = 1;
77745 +}
77746 +
77747 +static bool is_fptr(tree field)
77748 +{
77749 + tree ptr = TREE_TYPE(field);
77750 +
77751 + if (TREE_CODE(ptr) != POINTER_TYPE)
77752 + return false;
77753 +
77754 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
77755 +}
77756 +
77757 +static bool walk_struct(tree node)
77758 +{
77759 + tree field;
77760 +
77761 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
77762 + return false;
77763 +
77764 + if (TYPE_FIELDS(node) == NULL_TREE)
77765 + return false;
77766 +
77767 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
77768 + tree type = TREE_TYPE(field);
77769 + enum tree_code code = TREE_CODE(type);
77770 + if (code == RECORD_TYPE || code == UNION_TYPE) {
77771 + if (!(walk_struct(type)))
77772 + return false;
77773 + } else if (!is_fptr(field) && !TREE_READONLY(field))
77774 + return false;
77775 + }
77776 + return true;
77777 +}
77778 +
77779 +static void finish_type(void *event_data, void *data)
77780 +{
77781 + tree type = (tree)event_data;
77782 +
77783 + if (type == NULL_TREE)
77784 + return;
77785 +
77786 + if (TYPE_READONLY(type))
77787 + return;
77788 +
77789 + if (walk_struct(type))
77790 + constify_type(type);
77791 +}
77792 +
77793 +static unsigned int check_local_variables(void);
77794 +
77795 +struct gimple_opt_pass pass_local_variable = {
77796 + {
77797 + .type = GIMPLE_PASS,
77798 + .name = "check_local_variables",
77799 + .gate = NULL,
77800 + .execute = check_local_variables,
77801 + .sub = NULL,
77802 + .next = NULL,
77803 + .static_pass_number = 0,
77804 + .tv_id = TV_NONE,
77805 + .properties_required = 0,
77806 + .properties_provided = 0,
77807 + .properties_destroyed = 0,
77808 + .todo_flags_start = 0,
77809 + .todo_flags_finish = 0
77810 + }
77811 +};
77812 +
77813 +static unsigned int check_local_variables(void)
77814 +{
77815 + tree var;
77816 + referenced_var_iterator rvi;
77817 +
77818 +#if BUILDING_GCC_VERSION == 4005
77819 + FOR_EACH_REFERENCED_VAR(var, rvi) {
77820 +#else
77821 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
77822 +#endif
77823 + tree type = TREE_TYPE(var);
77824 +
77825 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
77826 + continue;
77827 +
77828 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
77829 + continue;
77830 +
77831 + if (!TYPE_READONLY(type))
77832 + continue;
77833 +
77834 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
77835 +// continue;
77836 +
77837 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
77838 +// continue;
77839 +
77840 + if (walk_struct(type)) {
77841 + error("constified variable %qE cannot be local", var);
77842 + return 1;
77843 + }
77844 + }
77845 + return 0;
77846 +}
77847 +
77848 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77849 +{
77850 + const char * const plugin_name = plugin_info->base_name;
77851 + const int argc = plugin_info->argc;
77852 + const struct plugin_argument * const argv = plugin_info->argv;
77853 + int i;
77854 + bool constify = true;
77855 +
77856 + struct register_pass_info local_variable_pass_info = {
77857 + .pass = &pass_local_variable.pass,
77858 + .reference_pass_name = "*referenced_vars",
77859 + .ref_pass_instance_number = 0,
77860 + .pos_op = PASS_POS_INSERT_AFTER
77861 + };
77862 +
77863 + if (!plugin_default_version_check(version, &gcc_version)) {
77864 + error(G_("incompatible gcc/plugin versions"));
77865 + return 1;
77866 + }
77867 +
77868 + for (i = 0; i < argc; ++i) {
77869 + if (!(strcmp(argv[i].key, "no-constify"))) {
77870 + constify = false;
77871 + continue;
77872 + }
77873 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77874 + }
77875 +
77876 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
77877 + if (constify) {
77878 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
77879 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
77880 + }
77881 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
77882 +
77883 + return 0;
77884 +}
77885 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
77886 new file mode 100644
77887 index 0000000..a5eabce
77888 --- /dev/null
77889 +++ b/tools/gcc/kallocstat_plugin.c
77890 @@ -0,0 +1,167 @@
77891 +/*
77892 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77893 + * Licensed under the GPL v2
77894 + *
77895 + * Note: the choice of the license means that the compilation process is
77896 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77897 + * but for the kernel it doesn't matter since it doesn't link against
77898 + * any of the gcc libraries
77899 + *
77900 + * gcc plugin to find the distribution of k*alloc sizes
77901 + *
77902 + * TODO:
77903 + *
77904 + * BUGS:
77905 + * - none known
77906 + */
77907 +#include "gcc-plugin.h"
77908 +#include "config.h"
77909 +#include "system.h"
77910 +#include "coretypes.h"
77911 +#include "tree.h"
77912 +#include "tree-pass.h"
77913 +#include "flags.h"
77914 +#include "intl.h"
77915 +#include "toplev.h"
77916 +#include "plugin.h"
77917 +//#include "expr.h" where are you...
77918 +#include "diagnostic.h"
77919 +#include "plugin-version.h"
77920 +#include "tm.h"
77921 +#include "function.h"
77922 +#include "basic-block.h"
77923 +#include "gimple.h"
77924 +#include "rtl.h"
77925 +#include "emit-rtl.h"
77926 +
77927 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77928 +
77929 +int plugin_is_GPL_compatible;
77930 +
77931 +static const char * const kalloc_functions[] = {
77932 + "__kmalloc",
77933 + "kmalloc",
77934 + "kmalloc_large",
77935 + "kmalloc_node",
77936 + "kmalloc_order",
77937 + "kmalloc_order_trace",
77938 + "kmalloc_slab",
77939 + "kzalloc",
77940 + "kzalloc_node",
77941 +};
77942 +
77943 +static struct plugin_info kallocstat_plugin_info = {
77944 + .version = "201111150100",
77945 +};
77946 +
77947 +static unsigned int execute_kallocstat(void);
77948 +
77949 +static struct gimple_opt_pass kallocstat_pass = {
77950 + .pass = {
77951 + .type = GIMPLE_PASS,
77952 + .name = "kallocstat",
77953 + .gate = NULL,
77954 + .execute = execute_kallocstat,
77955 + .sub = NULL,
77956 + .next = NULL,
77957 + .static_pass_number = 0,
77958 + .tv_id = TV_NONE,
77959 + .properties_required = 0,
77960 + .properties_provided = 0,
77961 + .properties_destroyed = 0,
77962 + .todo_flags_start = 0,
77963 + .todo_flags_finish = 0
77964 + }
77965 +};
77966 +
77967 +static bool is_kalloc(const char *fnname)
77968 +{
77969 + size_t i;
77970 +
77971 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
77972 + if (!strcmp(fnname, kalloc_functions[i]))
77973 + return true;
77974 + return false;
77975 +}
77976 +
77977 +static unsigned int execute_kallocstat(void)
77978 +{
77979 + basic_block bb;
77980 +
77981 + // 1. loop through BBs and GIMPLE statements
77982 + FOR_EACH_BB(bb) {
77983 + gimple_stmt_iterator gsi;
77984 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
77985 + // gimple match:
77986 + tree fndecl, size;
77987 + gimple call_stmt;
77988 + const char *fnname;
77989 +
77990 + // is it a call
77991 + call_stmt = gsi_stmt(gsi);
77992 + if (!is_gimple_call(call_stmt))
77993 + continue;
77994 + fndecl = gimple_call_fndecl(call_stmt);
77995 + if (fndecl == NULL_TREE)
77996 + continue;
77997 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
77998 + continue;
77999 +
78000 + // is it a call to k*alloc
78001 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
78002 + if (!is_kalloc(fnname))
78003 + continue;
78004 +
78005 + // is the size arg the result of a simple const assignment
78006 + size = gimple_call_arg(call_stmt, 0);
78007 + while (true) {
78008 + gimple def_stmt;
78009 + expanded_location xloc;
78010 + size_t size_val;
78011 +
78012 + if (TREE_CODE(size) != SSA_NAME)
78013 + break;
78014 + def_stmt = SSA_NAME_DEF_STMT(size);
78015 + if (!def_stmt || !is_gimple_assign(def_stmt))
78016 + break;
78017 + if (gimple_num_ops(def_stmt) != 2)
78018 + break;
78019 + size = gimple_assign_rhs1(def_stmt);
78020 + if (!TREE_CONSTANT(size))
78021 + continue;
78022 + xloc = expand_location(gimple_location(def_stmt));
78023 + if (!xloc.file)
78024 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
78025 + size_val = TREE_INT_CST_LOW(size);
78026 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
78027 + break;
78028 + }
78029 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78030 +//debug_tree(gimple_call_fn(call_stmt));
78031 +//print_node(stderr, "pax", fndecl, 4);
78032 + }
78033 + }
78034 +
78035 + return 0;
78036 +}
78037 +
78038 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78039 +{
78040 + const char * const plugin_name = plugin_info->base_name;
78041 + struct register_pass_info kallocstat_pass_info = {
78042 + .pass = &kallocstat_pass.pass,
78043 + .reference_pass_name = "ssa",
78044 + .ref_pass_instance_number = 0,
78045 + .pos_op = PASS_POS_INSERT_AFTER
78046 + };
78047 +
78048 + if (!plugin_default_version_check(version, &gcc_version)) {
78049 + error(G_("incompatible gcc/plugin versions"));
78050 + return 1;
78051 + }
78052 +
78053 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
78054 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
78055 +
78056 + return 0;
78057 +}
78058 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
78059 new file mode 100644
78060 index 0000000..51f747e
78061 --- /dev/null
78062 +++ b/tools/gcc/kernexec_plugin.c
78063 @@ -0,0 +1,348 @@
78064 +/*
78065 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78066 + * Licensed under the GPL v2
78067 + *
78068 + * Note: the choice of the license means that the compilation process is
78069 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78070 + * but for the kernel it doesn't matter since it doesn't link against
78071 + * any of the gcc libraries
78072 + *
78073 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
78074 + *
78075 + * TODO:
78076 + *
78077 + * BUGS:
78078 + * - none known
78079 + */
78080 +#include "gcc-plugin.h"
78081 +#include "config.h"
78082 +#include "system.h"
78083 +#include "coretypes.h"
78084 +#include "tree.h"
78085 +#include "tree-pass.h"
78086 +#include "flags.h"
78087 +#include "intl.h"
78088 +#include "toplev.h"
78089 +#include "plugin.h"
78090 +//#include "expr.h" where are you...
78091 +#include "diagnostic.h"
78092 +#include "plugin-version.h"
78093 +#include "tm.h"
78094 +#include "function.h"
78095 +#include "basic-block.h"
78096 +#include "gimple.h"
78097 +#include "rtl.h"
78098 +#include "emit-rtl.h"
78099 +#include "tree-flow.h"
78100 +
78101 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78102 +extern rtx emit_move_insn(rtx x, rtx y);
78103 +
78104 +int plugin_is_GPL_compatible;
78105 +
78106 +static struct plugin_info kernexec_plugin_info = {
78107 + .version = "201111291120",
78108 + .help = "method=[bts|or]\tinstrumentation method\n"
78109 +};
78110 +
78111 +static unsigned int execute_kernexec_fptr(void);
78112 +static unsigned int execute_kernexec_retaddr(void);
78113 +static bool kernexec_cmodel_check(void);
78114 +
78115 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator);
78116 +static void (*kernexec_instrument_retaddr)(rtx);
78117 +
78118 +static struct gimple_opt_pass kernexec_fptr_pass = {
78119 + .pass = {
78120 + .type = GIMPLE_PASS,
78121 + .name = "kernexec_fptr",
78122 + .gate = kernexec_cmodel_check,
78123 + .execute = execute_kernexec_fptr,
78124 + .sub = NULL,
78125 + .next = NULL,
78126 + .static_pass_number = 0,
78127 + .tv_id = TV_NONE,
78128 + .properties_required = 0,
78129 + .properties_provided = 0,
78130 + .properties_destroyed = 0,
78131 + .todo_flags_start = 0,
78132 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78133 + }
78134 +};
78135 +
78136 +static struct rtl_opt_pass kernexec_retaddr_pass = {
78137 + .pass = {
78138 + .type = RTL_PASS,
78139 + .name = "kernexec_retaddr",
78140 + .gate = kernexec_cmodel_check,
78141 + .execute = execute_kernexec_retaddr,
78142 + .sub = NULL,
78143 + .next = NULL,
78144 + .static_pass_number = 0,
78145 + .tv_id = TV_NONE,
78146 + .properties_required = 0,
78147 + .properties_provided = 0,
78148 + .properties_destroyed = 0,
78149 + .todo_flags_start = 0,
78150 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
78151 + }
78152 +};
78153 +
78154 +static bool kernexec_cmodel_check(void)
78155 +{
78156 + tree section;
78157 +
78158 + if (ix86_cmodel != CM_KERNEL)
78159 + return false;
78160 +
78161 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
78162 + if (!section || !TREE_VALUE(section))
78163 + return true;
78164 +
78165 + section = TREE_VALUE(TREE_VALUE(section));
78166 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
78167 + return true;
78168 +
78169 + return false;
78170 +}
78171 +
78172 +/*
78173 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
78174 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
78175 + */
78176 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator gsi)
78177 +{
78178 + gimple assign_intptr, assign_new_fptr, call_stmt;
78179 + tree intptr, old_fptr, new_fptr, kernexec_mask;
78180 +
78181 + call_stmt = gsi_stmt(gsi);
78182 + old_fptr = gimple_call_fn(call_stmt);
78183 +
78184 + // create temporary unsigned long variable used for bitops and cast fptr to it
78185 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
78186 + add_referenced_var(intptr);
78187 + mark_sym_for_renaming(intptr);
78188 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
78189 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
78190 + update_stmt(assign_intptr);
78191 +
78192 + // apply logical or to temporary unsigned long and bitmask
78193 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
78194 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
78195 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
78196 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
78197 + update_stmt(assign_intptr);
78198 +
78199 + // cast temporary unsigned long back to a temporary fptr variable
78200 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
78201 + add_referenced_var(new_fptr);
78202 + mark_sym_for_renaming(new_fptr);
78203 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
78204 + gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT);
78205 + update_stmt(assign_new_fptr);
78206 +
78207 + // replace call stmt fn with the new fptr
78208 + gimple_call_set_fn(call_stmt, new_fptr);
78209 + update_stmt(call_stmt);
78210 +}
78211 +
78212 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator gsi)
78213 +{
78214 + gimple asm_or_stmt, call_stmt;
78215 + tree old_fptr, new_fptr, input, output;
78216 + VEC(tree, gc) *inputs = NULL;
78217 + VEC(tree, gc) *outputs = NULL;
78218 +
78219 + call_stmt = gsi_stmt(gsi);
78220 + old_fptr = gimple_call_fn(call_stmt);
78221 +
78222 + // create temporary fptr variable
78223 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
78224 + add_referenced_var(new_fptr);
78225 + mark_sym_for_renaming(new_fptr);
78226 +
78227 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
78228 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
78229 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
78230 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
78231 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
78232 + VEC_safe_push(tree, gc, inputs, input);
78233 + VEC_safe_push(tree, gc, outputs, output);
78234 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
78235 + gimple_asm_set_volatile(asm_or_stmt, true);
78236 + gsi_insert_before(&gsi, asm_or_stmt, GSI_SAME_STMT);
78237 + update_stmt(asm_or_stmt);
78238 +
78239 + // replace call stmt fn with the new fptr
78240 + gimple_call_set_fn(call_stmt, new_fptr);
78241 + update_stmt(call_stmt);
78242 +}
78243 +
78244 +/*
78245 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
78246 + */
78247 +static unsigned int execute_kernexec_fptr(void)
78248 +{
78249 + basic_block bb;
78250 + gimple_stmt_iterator gsi;
78251 +
78252 + // 1. loop through BBs and GIMPLE statements
78253 + FOR_EACH_BB(bb) {
78254 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78255 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
78256 + tree fn;
78257 + gimple call_stmt;
78258 +
78259 + // is it a call ...
78260 + call_stmt = gsi_stmt(gsi);
78261 + if (!is_gimple_call(call_stmt))
78262 + continue;
78263 + fn = gimple_call_fn(call_stmt);
78264 + if (TREE_CODE(fn) == ADDR_EXPR)
78265 + continue;
78266 + if (TREE_CODE(fn) != SSA_NAME)
78267 + gcc_unreachable();
78268 +
78269 + // ... through a function pointer
78270 + fn = SSA_NAME_VAR(fn);
78271 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
78272 + continue;
78273 + fn = TREE_TYPE(fn);
78274 + if (TREE_CODE(fn) != POINTER_TYPE)
78275 + continue;
78276 + fn = TREE_TYPE(fn);
78277 + if (TREE_CODE(fn) != FUNCTION_TYPE)
78278 + continue;
78279 +
78280 + kernexec_instrument_fptr(gsi);
78281 +
78282 +//debug_tree(gimple_call_fn(call_stmt));
78283 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78284 + }
78285 + }
78286 +
78287 + return 0;
78288 +}
78289 +
78290 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
78291 +static void kernexec_instrument_retaddr_bts(rtx insn)
78292 +{
78293 + rtx btsq;
78294 + rtvec argvec, constraintvec, labelvec;
78295 + int line;
78296 +
78297 + // create asm volatile("btsq $63,(%%rsp)":::)
78298 + argvec = rtvec_alloc(0);
78299 + constraintvec = rtvec_alloc(0);
78300 + labelvec = rtvec_alloc(0);
78301 + line = expand_location(RTL_LOCATION(insn)).line;
78302 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78303 + MEM_VOLATILE_P(btsq) = 1;
78304 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
78305 + emit_insn_before(btsq, insn);
78306 +}
78307 +
78308 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
78309 +static void kernexec_instrument_retaddr_or(rtx insn)
78310 +{
78311 + rtx orq;
78312 + rtvec argvec, constraintvec, labelvec;
78313 + int line;
78314 +
78315 + // create asm volatile("orq %%r10,(%%rsp)":::)
78316 + argvec = rtvec_alloc(0);
78317 + constraintvec = rtvec_alloc(0);
78318 + labelvec = rtvec_alloc(0);
78319 + line = expand_location(RTL_LOCATION(insn)).line;
78320 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78321 + MEM_VOLATILE_P(orq) = 1;
78322 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
78323 + emit_insn_before(orq, insn);
78324 +}
78325 +
78326 +/*
78327 + * find all asm level function returns and forcibly set the highest bit of the return address
78328 + */
78329 +static unsigned int execute_kernexec_retaddr(void)
78330 +{
78331 + rtx insn;
78332 +
78333 + // 1. find function returns
78334 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78335 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
78336 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
78337 + rtx body;
78338 +
78339 + // is it a retn
78340 + if (!JUMP_P(insn))
78341 + continue;
78342 + body = PATTERN(insn);
78343 + if (GET_CODE(body) == PARALLEL)
78344 + body = XVECEXP(body, 0, 0);
78345 + if (GET_CODE(body) != RETURN)
78346 + continue;
78347 + kernexec_instrument_retaddr(insn);
78348 + }
78349 +
78350 +// print_simple_rtl(stderr, get_insns());
78351 +// print_rtl(stderr, get_insns());
78352 +
78353 + return 0;
78354 +}
78355 +
78356 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78357 +{
78358 + const char * const plugin_name = plugin_info->base_name;
78359 + const int argc = plugin_info->argc;
78360 + const struct plugin_argument * const argv = plugin_info->argv;
78361 + int i;
78362 + struct register_pass_info kernexec_fptr_pass_info = {
78363 + .pass = &kernexec_fptr_pass.pass,
78364 + .reference_pass_name = "ssa",
78365 + .ref_pass_instance_number = 0,
78366 + .pos_op = PASS_POS_INSERT_AFTER
78367 + };
78368 + struct register_pass_info kernexec_retaddr_pass_info = {
78369 + .pass = &kernexec_retaddr_pass.pass,
78370 + .reference_pass_name = "pro_and_epilogue",
78371 + .ref_pass_instance_number = 0,
78372 + .pos_op = PASS_POS_INSERT_AFTER
78373 + };
78374 +
78375 + if (!plugin_default_version_check(version, &gcc_version)) {
78376 + error(G_("incompatible gcc/plugin versions"));
78377 + return 1;
78378 + }
78379 +
78380 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
78381 +
78382 + if (TARGET_64BIT == 0)
78383 + return 0;
78384 +
78385 + for (i = 0; i < argc; ++i) {
78386 + if (!strcmp(argv[i].key, "method")) {
78387 + if (!argv[i].value) {
78388 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78389 + continue;
78390 + }
78391 + if (!strcmp(argv[i].value, "bts")) {
78392 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
78393 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
78394 + } else if (!strcmp(argv[i].value, "or")) {
78395 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
78396 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
78397 + fix_register("r10", 1, 1);
78398 + } else
78399 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78400 + continue;
78401 + }
78402 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78403 + }
78404 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
78405 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
78406 +
78407 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
78408 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
78409 +
78410 + return 0;
78411 +}
78412 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
78413 new file mode 100644
78414 index 0000000..d44f37c
78415 --- /dev/null
78416 +++ b/tools/gcc/stackleak_plugin.c
78417 @@ -0,0 +1,291 @@
78418 +/*
78419 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78420 + * Licensed under the GPL v2
78421 + *
78422 + * Note: the choice of the license means that the compilation process is
78423 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78424 + * but for the kernel it doesn't matter since it doesn't link against
78425 + * any of the gcc libraries
78426 + *
78427 + * gcc plugin to help implement various PaX features
78428 + *
78429 + * - track lowest stack pointer
78430 + *
78431 + * TODO:
78432 + * - initialize all local variables
78433 + *
78434 + * BUGS:
78435 + * - none known
78436 + */
78437 +#include "gcc-plugin.h"
78438 +#include "config.h"
78439 +#include "system.h"
78440 +#include "coretypes.h"
78441 +#include "tree.h"
78442 +#include "tree-pass.h"
78443 +#include "flags.h"
78444 +#include "intl.h"
78445 +#include "toplev.h"
78446 +#include "plugin.h"
78447 +//#include "expr.h" where are you...
78448 +#include "diagnostic.h"
78449 +#include "plugin-version.h"
78450 +#include "tm.h"
78451 +#include "function.h"
78452 +#include "basic-block.h"
78453 +#include "gimple.h"
78454 +#include "rtl.h"
78455 +#include "emit-rtl.h"
78456 +
78457 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78458 +
78459 +int plugin_is_GPL_compatible;
78460 +
78461 +static int track_frame_size = -1;
78462 +static const char track_function[] = "pax_track_stack";
78463 +static const char check_function[] = "pax_check_alloca";
78464 +static bool init_locals;
78465 +
78466 +static struct plugin_info stackleak_plugin_info = {
78467 + .version = "201111150100",
78468 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
78469 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
78470 +};
78471 +
78472 +static bool gate_stackleak_track_stack(void);
78473 +static unsigned int execute_stackleak_tree_instrument(void);
78474 +static unsigned int execute_stackleak_final(void);
78475 +
78476 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
78477 + .pass = {
78478 + .type = GIMPLE_PASS,
78479 + .name = "stackleak_tree_instrument",
78480 + .gate = gate_stackleak_track_stack,
78481 + .execute = execute_stackleak_tree_instrument,
78482 + .sub = NULL,
78483 + .next = NULL,
78484 + .static_pass_number = 0,
78485 + .tv_id = TV_NONE,
78486 + .properties_required = PROP_gimple_leh | PROP_cfg,
78487 + .properties_provided = 0,
78488 + .properties_destroyed = 0,
78489 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
78490 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
78491 + }
78492 +};
78493 +
78494 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
78495 + .pass = {
78496 + .type = RTL_PASS,
78497 + .name = "stackleak_final",
78498 + .gate = gate_stackleak_track_stack,
78499 + .execute = execute_stackleak_final,
78500 + .sub = NULL,
78501 + .next = NULL,
78502 + .static_pass_number = 0,
78503 + .tv_id = TV_NONE,
78504 + .properties_required = 0,
78505 + .properties_provided = 0,
78506 + .properties_destroyed = 0,
78507 + .todo_flags_start = 0,
78508 + .todo_flags_finish = TODO_dump_func
78509 + }
78510 +};
78511 +
78512 +static bool gate_stackleak_track_stack(void)
78513 +{
78514 + return track_frame_size >= 0;
78515 +}
78516 +
78517 +static void stackleak_check_alloca(gimple_stmt_iterator gsi)
78518 +{
78519 + gimple check_alloca;
78520 + tree fndecl, fntype, alloca_size;
78521 +
78522 + // insert call to void pax_check_alloca(unsigned long size)
78523 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
78524 + fndecl = build_fn_decl(check_function, fntype);
78525 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
78526 + alloca_size = gimple_call_arg(gsi_stmt(gsi), 0);
78527 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
78528 + gsi_insert_before(&gsi, check_alloca, GSI_CONTINUE_LINKING);
78529 +}
78530 +
78531 +static void stackleak_add_instrumentation(gimple_stmt_iterator gsi)
78532 +{
78533 + gimple track_stack;
78534 + tree fndecl, fntype;
78535 +
78536 + // insert call to void pax_track_stack(void)
78537 + fntype = build_function_type_list(void_type_node, NULL_TREE);
78538 + fndecl = build_fn_decl(track_function, fntype);
78539 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
78540 + track_stack = gimple_build_call(fndecl, 0);
78541 + gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING);
78542 +}
78543 +
78544 +#if BUILDING_GCC_VERSION == 4005
78545 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
78546 +{
78547 + tree fndecl;
78548 +
78549 + if (!is_gimple_call(stmt))
78550 + return false;
78551 + fndecl = gimple_call_fndecl(stmt);
78552 + if (!fndecl)
78553 + return false;
78554 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
78555 + return false;
78556 +// print_node(stderr, "pax", fndecl, 4);
78557 + return DECL_FUNCTION_CODE(fndecl) == code;
78558 +}
78559 +#endif
78560 +
78561 +static bool is_alloca(gimple stmt)
78562 +{
78563 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
78564 + return true;
78565 +
78566 +#if BUILDING_GCC_VERSION >= 4007
78567 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
78568 + return true;
78569 +#endif
78570 +
78571 + return false;
78572 +}
78573 +
78574 +static unsigned int execute_stackleak_tree_instrument(void)
78575 +{
78576 + basic_block bb, entry_bb;
78577 + bool prologue_instrumented = false;
78578 +
78579 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
78580 +
78581 + // 1. loop through BBs and GIMPLE statements
78582 + FOR_EACH_BB(bb) {
78583 + gimple_stmt_iterator gsi;
78584 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78585 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
78586 + if (!is_alloca(gsi_stmt(gsi)))
78587 + continue;
78588 +
78589 + // 2. insert stack overflow check before each __builtin_alloca call
78590 + stackleak_check_alloca(gsi);
78591 +
78592 + // 3. insert track call after each __builtin_alloca call
78593 + stackleak_add_instrumentation(gsi);
78594 + if (bb == entry_bb)
78595 + prologue_instrumented = true;
78596 + }
78597 + }
78598 +
78599 + // 4. insert track call at the beginning
78600 + if (!prologue_instrumented) {
78601 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
78602 + if (dom_info_available_p(CDI_DOMINATORS))
78603 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
78604 + stackleak_add_instrumentation(gsi_start_bb(bb));
78605 + }
78606 +
78607 + return 0;
78608 +}
78609 +
78610 +static unsigned int execute_stackleak_final(void)
78611 +{
78612 + rtx insn;
78613 +
78614 + if (cfun->calls_alloca)
78615 + return 0;
78616 +
78617 + // keep calls only if function frame is big enough
78618 + if (get_frame_size() >= track_frame_size)
78619 + return 0;
78620 +
78621 + // 1. find pax_track_stack calls
78622 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78623 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
78624 + rtx body;
78625 +
78626 + if (!CALL_P(insn))
78627 + continue;
78628 + body = PATTERN(insn);
78629 + if (GET_CODE(body) != CALL)
78630 + continue;
78631 + body = XEXP(body, 0);
78632 + if (GET_CODE(body) != MEM)
78633 + continue;
78634 + body = XEXP(body, 0);
78635 + if (GET_CODE(body) != SYMBOL_REF)
78636 + continue;
78637 + if (strcmp(XSTR(body, 0), track_function))
78638 + continue;
78639 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78640 + // 2. delete call
78641 + insn = delete_insn_and_edges(insn);
78642 +#if BUILDING_GCC_VERSION >= 4007
78643 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
78644 + insn = delete_insn_and_edges(insn);
78645 +#endif
78646 + }
78647 +
78648 +// print_simple_rtl(stderr, get_insns());
78649 +// print_rtl(stderr, get_insns());
78650 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78651 +
78652 + return 0;
78653 +}
78654 +
78655 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78656 +{
78657 + const char * const plugin_name = plugin_info->base_name;
78658 + const int argc = plugin_info->argc;
78659 + const struct plugin_argument * const argv = plugin_info->argv;
78660 + int i;
78661 + struct register_pass_info stackleak_tree_instrument_pass_info = {
78662 + .pass = &stackleak_tree_instrument_pass.pass,
78663 +// .reference_pass_name = "tree_profile",
78664 + .reference_pass_name = "optimized",
78665 + .ref_pass_instance_number = 0,
78666 + .pos_op = PASS_POS_INSERT_AFTER
78667 + };
78668 + struct register_pass_info stackleak_final_pass_info = {
78669 + .pass = &stackleak_final_rtl_opt_pass.pass,
78670 + .reference_pass_name = "final",
78671 + .ref_pass_instance_number = 0,
78672 + .pos_op = PASS_POS_INSERT_BEFORE
78673 + };
78674 +
78675 + if (!plugin_default_version_check(version, &gcc_version)) {
78676 + error(G_("incompatible gcc/plugin versions"));
78677 + return 1;
78678 + }
78679 +
78680 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
78681 +
78682 + for (i = 0; i < argc; ++i) {
78683 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
78684 + if (!argv[i].value) {
78685 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78686 + continue;
78687 + }
78688 + track_frame_size = atoi(argv[i].value);
78689 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
78690 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78691 + continue;
78692 + }
78693 + if (!strcmp(argv[i].key, "initialize-locals")) {
78694 + if (argv[i].value) {
78695 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78696 + continue;
78697 + }
78698 + init_locals = true;
78699 + continue;
78700 + }
78701 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78702 + }
78703 +
78704 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
78705 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
78706 +
78707 + return 0;
78708 +}
78709 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
78710 index 6789d78..4afd019 100644
78711 --- a/tools/perf/util/include/asm/alternative-asm.h
78712 +++ b/tools/perf/util/include/asm/alternative-asm.h
78713 @@ -5,4 +5,7 @@
78714
78715 #define altinstruction_entry #
78716
78717 + .macro pax_force_retaddr rip=0, reload=0
78718 + .endm
78719 +
78720 #endif
78721 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
78722 index af0f22f..9a7d479 100644
78723 --- a/usr/gen_init_cpio.c
78724 +++ b/usr/gen_init_cpio.c
78725 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
78726 int retval;
78727 int rc = -1;
78728 int namesize;
78729 - int i;
78730 + unsigned int i;
78731
78732 mode |= S_IFREG;
78733
78734 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
78735 *env_var = *expanded = '\0';
78736 strncat(env_var, start + 2, end - start - 2);
78737 strncat(expanded, new_location, start - new_location);
78738 - strncat(expanded, getenv(env_var), PATH_MAX);
78739 - strncat(expanded, end + 1, PATH_MAX);
78740 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
78741 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
78742 strncpy(new_location, expanded, PATH_MAX);
78743 + new_location[PATH_MAX] = 0;
78744 } else
78745 break;
78746 }
78747 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
78748 index d9cfb78..4f27c10 100644
78749 --- a/virt/kvm/kvm_main.c
78750 +++ b/virt/kvm/kvm_main.c
78751 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
78752
78753 static cpumask_var_t cpus_hardware_enabled;
78754 static int kvm_usage_count = 0;
78755 -static atomic_t hardware_enable_failed;
78756 +static atomic_unchecked_t hardware_enable_failed;
78757
78758 struct kmem_cache *kvm_vcpu_cache;
78759 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
78760 @@ -2268,7 +2268,7 @@ static void hardware_enable_nolock(void *junk)
78761
78762 if (r) {
78763 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
78764 - atomic_inc(&hardware_enable_failed);
78765 + atomic_inc_unchecked(&hardware_enable_failed);
78766 printk(KERN_INFO "kvm: enabling virtualization on "
78767 "CPU%d failed\n", cpu);
78768 }
78769 @@ -2322,10 +2322,10 @@ static int hardware_enable_all(void)
78770
78771 kvm_usage_count++;
78772 if (kvm_usage_count == 1) {
78773 - atomic_set(&hardware_enable_failed, 0);
78774 + atomic_set_unchecked(&hardware_enable_failed, 0);
78775 on_each_cpu(hardware_enable_nolock, NULL, 1);
78776
78777 - if (atomic_read(&hardware_enable_failed)) {
78778 + if (atomic_read_unchecked(&hardware_enable_failed)) {
78779 hardware_disable_all_nolock();
78780 r = -EBUSY;
78781 }
78782 @@ -2676,7 +2676,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
78783 kvm_arch_vcpu_put(vcpu);
78784 }
78785
78786 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78787 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78788 struct module *module)
78789 {
78790 int r;
78791 @@ -2739,7 +2739,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78792 if (!vcpu_align)
78793 vcpu_align = __alignof__(struct kvm_vcpu);
78794 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
78795 - 0, NULL);
78796 + SLAB_USERCOPY, NULL);
78797 if (!kvm_vcpu_cache) {
78798 r = -ENOMEM;
78799 goto out_free_3;
78800 @@ -2749,9 +2749,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78801 if (r)
78802 goto out_free;
78803
78804 - kvm_chardev_ops.owner = module;
78805 - kvm_vm_fops.owner = module;
78806 - kvm_vcpu_fops.owner = module;
78807 + pax_open_kernel();
78808 + *(void **)&kvm_chardev_ops.owner = module;
78809 + *(void **)&kvm_vm_fops.owner = module;
78810 + *(void **)&kvm_vcpu_fops.owner = module;
78811 + pax_close_kernel();
78812
78813 r = misc_register(&kvm_dev);
78814 if (r) {