]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-3.2.7-201202252120.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.2.7-201202252120.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..0095943 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70 +exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74 @@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78 +gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90 @@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103 -linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107 @@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111 -media
112 mconf
113 +mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120 +mkpiggy
121 mkprep
122 mkregtable
123 mktables
124 @@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128 +regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132 @@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152 +vmlinux.bin.bz2
153 vmlinux.lds
154 +vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158 @@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zconf.lex.c
169 zoffset.h
170 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171 index 81c287f..d456d02 100644
172 --- a/Documentation/kernel-parameters.txt
173 +++ b/Documentation/kernel-parameters.txt
174 @@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179 + virtualization environments that don't cope well with the
180 + expand down segment used by UDEREF on X86-32 or the frequent
181 + page table updates on X86-64.
182 +
183 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184 +
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188 diff --git a/Makefile b/Makefile
189 index d1bdc90..6b9874c 100644
190 --- a/Makefile
191 +++ b/Makefile
192 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197 -HOSTCXXFLAGS = -O2
198 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208 -PHONY += scripts_basic
209 -scripts_basic:
210 +PHONY += scripts_basic gcc-plugins
211 +scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215 @@ -564,6 +565,46 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219 +ifndef DISABLE_PAX_PLUGINS
220 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223 +endif
224 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
225 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226 +STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227 +endif
228 +ifdef CONFIG_KALLOCSTAT_PLUGIN
229 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230 +endif
231 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233 +KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
234 +endif
235 +ifdef CONFIG_CHECKER_PLUGIN
236 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238 +endif
239 +endif
240 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242 +ifeq ($(KBUILD_EXTMOD),)
243 +gcc-plugins:
244 + $(Q)$(MAKE) $(build)=tools/gcc
245 +else
246 +gcc-plugins: ;
247 +endif
248 +else
249 +gcc-plugins:
250 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
251 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
252 +else
253 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
254 +endif
255 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
256 +endif
257 +endif
258 +
259 include $(srctree)/arch/$(SRCARCH)/Makefile
260
261 ifneq ($(CONFIG_FRAME_WARN),0)
262 @@ -708,7 +749,7 @@ export mod_strip_cmd
263
264
265 ifeq ($(KBUILD_EXTMOD),)
266 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
267 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
268
269 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
270 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
271 @@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
272
273 # The actual objects are generated when descending,
274 # make sure no implicit rule kicks in
275 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
276 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
277
278 # Handle descending into subdirectories listed in $(vmlinux-dirs)
279 @@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280 # Error messages still appears in the original language
281
282 PHONY += $(vmlinux-dirs)
283 -$(vmlinux-dirs): prepare scripts
284 +$(vmlinux-dirs): gcc-plugins prepare scripts
285 $(Q)$(MAKE) $(build)=$@
286
287 # Store (new) KERNELRELASE string in include/config/kernel.release
288 @@ -985,6 +1027,7 @@ prepare0: archprepare FORCE
289 $(Q)$(MAKE) $(build)=.
290
291 # All the preparing..
292 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
293 prepare: prepare0
294
295 # Generate some files
296 @@ -1086,6 +1129,7 @@ all: modules
297 # using awk while concatenating to the final file.
298
299 PHONY += modules
300 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
301 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
302 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
303 @$(kecho) ' Building modules, stage 2.';
304 @@ -1101,7 +1145,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
305
306 # Target to prepare building external modules
307 PHONY += modules_prepare
308 -modules_prepare: prepare scripts
309 +modules_prepare: gcc-plugins prepare scripts
310
311 # Target to install modules
312 PHONY += modules_install
313 @@ -1198,6 +1242,7 @@ distclean: mrproper
314 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
315 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
316 -o -name '.*.rej' \
317 + -o -name '.*.rej' -o -name '*.so' \
318 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
319 -type f -print | xargs rm -f
320
321 @@ -1358,6 +1403,7 @@ PHONY += $(module-dirs) modules
322 $(module-dirs): crmodverdir $(objtree)/Module.symvers
323 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
324
325 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
326 modules: $(module-dirs)
327 @$(kecho) ' Building modules, stage 2.';
328 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
329 @@ -1484,17 +1530,19 @@ else
330 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
331 endif
332
333 -%.s: %.c prepare scripts FORCE
334 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
335 +%.s: %.c gcc-plugins prepare scripts FORCE
336 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
337 %.i: %.c prepare scripts FORCE
338 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
339 -%.o: %.c prepare scripts FORCE
340 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
341 +%.o: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.lst: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345 -%.s: %.S prepare scripts FORCE
346 +%.s: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348 -%.o: %.S prepare scripts FORCE
349 +%.o: %.S gcc-plugins prepare scripts FORCE
350 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
351 %.symtypes: %.c prepare scripts FORCE
352 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
353 @@ -1504,11 +1552,13 @@ endif
354 $(cmd_crmodverdir)
355 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
356 $(build)=$(build-dir)
357 -%/: prepare scripts FORCE
358 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
359 +%/: gcc-plugins prepare scripts FORCE
360 $(cmd_crmodverdir)
361 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
362 $(build)=$(build-dir)
363 -%.ko: prepare scripts FORCE
364 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
365 +%.ko: gcc-plugins prepare scripts FORCE
366 $(cmd_crmodverdir)
367 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
368 $(build)=$(build-dir) $(@:.ko=.o)
369 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
370 index 640f909..48b6597 100644
371 --- a/arch/alpha/include/asm/atomic.h
372 +++ b/arch/alpha/include/asm/atomic.h
373 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
374 #define atomic_dec(v) atomic_sub(1,(v))
375 #define atomic64_dec(v) atomic64_sub(1,(v))
376
377 +#define atomic64_read_unchecked(v) atomic64_read(v)
378 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
379 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
380 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
381 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
382 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
383 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
384 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
385 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
386 +
387 #define smp_mb__before_atomic_dec() smp_mb()
388 #define smp_mb__after_atomic_dec() smp_mb()
389 #define smp_mb__before_atomic_inc() smp_mb()
390 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
391 index da5449e..7418343 100644
392 --- a/arch/alpha/include/asm/elf.h
393 +++ b/arch/alpha/include/asm/elf.h
394 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
395
396 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
397
398 +#ifdef CONFIG_PAX_ASLR
399 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
400 +
401 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
402 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
403 +#endif
404 +
405 /* $0 is set by ld.so to a pointer to a function which might be
406 registered using atexit. This provides a mean for the dynamic
407 linker to call DT_FINI functions for shared libraries that have
408 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
409 index de98a73..bd4f1f8 100644
410 --- a/arch/alpha/include/asm/pgtable.h
411 +++ b/arch/alpha/include/asm/pgtable.h
412 @@ -101,6 +101,17 @@ struct vm_area_struct;
413 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
414 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
415 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
416 +
417 +#ifdef CONFIG_PAX_PAGEEXEC
418 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
419 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
420 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
421 +#else
422 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
423 +# define PAGE_COPY_NOEXEC PAGE_COPY
424 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
425 +#endif
426 +
427 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
428
429 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
430 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
431 index 2fd00b7..cfd5069 100644
432 --- a/arch/alpha/kernel/module.c
433 +++ b/arch/alpha/kernel/module.c
434 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
435
436 /* The small sections were sorted to the end of the segment.
437 The following should definitely cover them. */
438 - gp = (u64)me->module_core + me->core_size - 0x8000;
439 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
440 got = sechdrs[me->arch.gotsecindex].sh_addr;
441
442 for (i = 0; i < n; i++) {
443 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
444 index 01e8715..be0e80f 100644
445 --- a/arch/alpha/kernel/osf_sys.c
446 +++ b/arch/alpha/kernel/osf_sys.c
447 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
448 /* At this point: (!vma || addr < vma->vm_end). */
449 if (limit - len < addr)
450 return -ENOMEM;
451 - if (!vma || addr + len <= vma->vm_start)
452 + if (check_heap_stack_gap(vma, addr, len))
453 return addr;
454 addr = vma->vm_end;
455 vma = vma->vm_next;
456 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
457 merely specific addresses, but regions of memory -- perhaps
458 this feature should be incorporated into all ports? */
459
460 +#ifdef CONFIG_PAX_RANDMMAP
461 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
462 +#endif
463 +
464 if (addr) {
465 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
466 if (addr != (unsigned long) -ENOMEM)
467 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
468 }
469
470 /* Next, try allocating at TASK_UNMAPPED_BASE. */
471 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
472 - len, limit);
473 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
474 +
475 if (addr != (unsigned long) -ENOMEM)
476 return addr;
477
478 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
479 index fadd5f8..904e73a 100644
480 --- a/arch/alpha/mm/fault.c
481 +++ b/arch/alpha/mm/fault.c
482 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
483 __reload_thread(pcb);
484 }
485
486 +#ifdef CONFIG_PAX_PAGEEXEC
487 +/*
488 + * PaX: decide what to do with offenders (regs->pc = fault address)
489 + *
490 + * returns 1 when task should be killed
491 + * 2 when patched PLT trampoline was detected
492 + * 3 when unpatched PLT trampoline was detected
493 + */
494 +static int pax_handle_fetch_fault(struct pt_regs *regs)
495 +{
496 +
497 +#ifdef CONFIG_PAX_EMUPLT
498 + int err;
499 +
500 + do { /* PaX: patched PLT emulation #1 */
501 + unsigned int ldah, ldq, jmp;
502 +
503 + err = get_user(ldah, (unsigned int *)regs->pc);
504 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
505 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
506 +
507 + if (err)
508 + break;
509 +
510 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
511 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
512 + jmp == 0x6BFB0000U)
513 + {
514 + unsigned long r27, addr;
515 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
516 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
517 +
518 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
519 + err = get_user(r27, (unsigned long *)addr);
520 + if (err)
521 + break;
522 +
523 + regs->r27 = r27;
524 + regs->pc = r27;
525 + return 2;
526 + }
527 + } while (0);
528 +
529 + do { /* PaX: patched PLT emulation #2 */
530 + unsigned int ldah, lda, br;
531 +
532 + err = get_user(ldah, (unsigned int *)regs->pc);
533 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
534 + err |= get_user(br, (unsigned int *)(regs->pc+8));
535 +
536 + if (err)
537 + break;
538 +
539 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
540 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
541 + (br & 0xFFE00000U) == 0xC3E00000U)
542 + {
543 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
544 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
545 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
546 +
547 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
548 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
549 + return 2;
550 + }
551 + } while (0);
552 +
553 + do { /* PaX: unpatched PLT emulation */
554 + unsigned int br;
555 +
556 + err = get_user(br, (unsigned int *)regs->pc);
557 +
558 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
559 + unsigned int br2, ldq, nop, jmp;
560 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
561 +
562 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
563 + err = get_user(br2, (unsigned int *)addr);
564 + err |= get_user(ldq, (unsigned int *)(addr+4));
565 + err |= get_user(nop, (unsigned int *)(addr+8));
566 + err |= get_user(jmp, (unsigned int *)(addr+12));
567 + err |= get_user(resolver, (unsigned long *)(addr+16));
568 +
569 + if (err)
570 + break;
571 +
572 + if (br2 == 0xC3600000U &&
573 + ldq == 0xA77B000CU &&
574 + nop == 0x47FF041FU &&
575 + jmp == 0x6B7B0000U)
576 + {
577 + regs->r28 = regs->pc+4;
578 + regs->r27 = addr+16;
579 + regs->pc = resolver;
580 + return 3;
581 + }
582 + }
583 + } while (0);
584 +#endif
585 +
586 + return 1;
587 +}
588 +
589 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
590 +{
591 + unsigned long i;
592 +
593 + printk(KERN_ERR "PAX: bytes at PC: ");
594 + for (i = 0; i < 5; i++) {
595 + unsigned int c;
596 + if (get_user(c, (unsigned int *)pc+i))
597 + printk(KERN_CONT "???????? ");
598 + else
599 + printk(KERN_CONT "%08x ", c);
600 + }
601 + printk("\n");
602 +}
603 +#endif
604
605 /*
606 * This routine handles page faults. It determines the address,
607 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
608 good_area:
609 si_code = SEGV_ACCERR;
610 if (cause < 0) {
611 - if (!(vma->vm_flags & VM_EXEC))
612 + if (!(vma->vm_flags & VM_EXEC)) {
613 +
614 +#ifdef CONFIG_PAX_PAGEEXEC
615 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
616 + goto bad_area;
617 +
618 + up_read(&mm->mmap_sem);
619 + switch (pax_handle_fetch_fault(regs)) {
620 +
621 +#ifdef CONFIG_PAX_EMUPLT
622 + case 2:
623 + case 3:
624 + return;
625 +#endif
626 +
627 + }
628 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
629 + do_group_exit(SIGKILL);
630 +#else
631 goto bad_area;
632 +#endif
633 +
634 + }
635 } else if (!cause) {
636 /* Allow reads even for write-only mappings */
637 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
638 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
639 index 86976d0..6610950 100644
640 --- a/arch/arm/include/asm/atomic.h
641 +++ b/arch/arm/include/asm/atomic.h
642 @@ -15,6 +15,10 @@
643 #include <linux/types.h>
644 #include <asm/system.h>
645
646 +#ifdef CONFIG_GENERIC_ATOMIC64
647 +#include <asm-generic/atomic64.h>
648 +#endif
649 +
650 #define ATOMIC_INIT(i) { (i) }
651
652 #ifdef __KERNEL__
653 @@ -239,6 +243,14 @@ typedef struct {
654 u64 __aligned(8) counter;
655 } atomic64_t;
656
657 +#ifdef CONFIG_PAX_REFCOUNT
658 +typedef struct {
659 + u64 __aligned(8) counter;
660 +} atomic64_unchecked_t;
661 +#else
662 +typedef atomic64_t atomic64_unchecked_t;
663 +#endif
664 +
665 #define ATOMIC64_INIT(i) { (i) }
666
667 static inline u64 atomic64_read(atomic64_t *v)
668 @@ -459,6 +471,16 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
669 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
670 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
671
672 +#define atomic64_read_unchecked(v) atomic64_read(v)
673 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
674 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
675 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
676 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
677 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
678 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
679 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
680 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
681 +
682 #endif /* !CONFIG_GENERIC_ATOMIC64 */
683 #endif
684 #endif
685 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
686 index 0e9ce8d..6ef1e03 100644
687 --- a/arch/arm/include/asm/elf.h
688 +++ b/arch/arm/include/asm/elf.h
689 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
690 the loader. We need to make sure that it is out of the way of the program
691 that it will "exec", and that there is sufficient room for the brk. */
692
693 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
694 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
695 +
696 +#ifdef CONFIG_PAX_ASLR
697 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
698 +
699 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
700 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
701 +#endif
702
703 /* When the program starts, a1 contains a pointer to a function to be
704 registered with atexit, as per the SVR4 ABI. A value of 0 means we
705 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
706 extern void elf_set_personality(const struct elf32_hdr *);
707 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
708
709 -struct mm_struct;
710 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
711 -#define arch_randomize_brk arch_randomize_brk
712 -
713 extern int vectors_user_mapping(void);
714 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
715 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
716 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
717 index e51b1e8..32a3113 100644
718 --- a/arch/arm/include/asm/kmap_types.h
719 +++ b/arch/arm/include/asm/kmap_types.h
720 @@ -21,6 +21,7 @@ enum km_type {
721 KM_L1_CACHE,
722 KM_L2_CACHE,
723 KM_KDB,
724 + KM_CLEARPAGE,
725 KM_TYPE_NR
726 };
727
728 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
729 index b293616..96310e5 100644
730 --- a/arch/arm/include/asm/uaccess.h
731 +++ b/arch/arm/include/asm/uaccess.h
732 @@ -22,6 +22,8 @@
733 #define VERIFY_READ 0
734 #define VERIFY_WRITE 1
735
736 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
737 +
738 /*
739 * The exception table consists of pairs of addresses: the first is the
740 * address of an instruction that is allowed to fault, and the second is
741 @@ -387,8 +389,23 @@ do { \
742
743
744 #ifdef CONFIG_MMU
745 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
746 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
747 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
748 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
749 +
750 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
751 +{
752 + if (!__builtin_constant_p(n))
753 + check_object_size(to, n, false);
754 + return ___copy_from_user(to, from, n);
755 +}
756 +
757 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
758 +{
759 + if (!__builtin_constant_p(n))
760 + check_object_size(from, n, true);
761 + return ___copy_to_user(to, from, n);
762 +}
763 +
764 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
765 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
766 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
767 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
768
769 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
770 {
771 + if ((long)n < 0)
772 + return n;
773 +
774 if (access_ok(VERIFY_READ, from, n))
775 n = __copy_from_user(to, from, n);
776 else /* security hole - plug it */
777 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
778
779 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
780 {
781 + if ((long)n < 0)
782 + return n;
783 +
784 if (access_ok(VERIFY_WRITE, to, n))
785 n = __copy_to_user(to, from, n);
786 return n;
787 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
788 index 5b0bce6..becd81c 100644
789 --- a/arch/arm/kernel/armksyms.c
790 +++ b/arch/arm/kernel/armksyms.c
791 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
792 #ifdef CONFIG_MMU
793 EXPORT_SYMBOL(copy_page);
794
795 -EXPORT_SYMBOL(__copy_from_user);
796 -EXPORT_SYMBOL(__copy_to_user);
797 +EXPORT_SYMBOL(___copy_from_user);
798 +EXPORT_SYMBOL(___copy_to_user);
799 EXPORT_SYMBOL(__clear_user);
800
801 EXPORT_SYMBOL(__get_user_1);
802 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
803 index 3d0c6fb..3dcae52 100644
804 --- a/arch/arm/kernel/process.c
805 +++ b/arch/arm/kernel/process.c
806 @@ -28,7 +28,6 @@
807 #include <linux/tick.h>
808 #include <linux/utsname.h>
809 #include <linux/uaccess.h>
810 -#include <linux/random.h>
811 #include <linux/hw_breakpoint.h>
812 #include <linux/cpuidle.h>
813
814 @@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
815 return 0;
816 }
817
818 -unsigned long arch_randomize_brk(struct mm_struct *mm)
819 -{
820 - unsigned long range_end = mm->brk + 0x02000000;
821 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
822 -}
823 -
824 #ifdef CONFIG_MMU
825 /*
826 * The vectors page is always readable from user space for the
827 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
828 index 99a5727..a3d5bb1 100644
829 --- a/arch/arm/kernel/traps.c
830 +++ b/arch/arm/kernel/traps.c
831 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
832
833 static DEFINE_RAW_SPINLOCK(die_lock);
834
835 +extern void gr_handle_kernel_exploit(void);
836 +
837 /*
838 * This function is protected against re-entrancy.
839 */
840 @@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
841 panic("Fatal exception in interrupt");
842 if (panic_on_oops)
843 panic("Fatal exception");
844 +
845 + gr_handle_kernel_exploit();
846 +
847 if (ret != NOTIFY_STOP)
848 do_exit(SIGSEGV);
849 }
850 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
851 index 66a477a..bee61d3 100644
852 --- a/arch/arm/lib/copy_from_user.S
853 +++ b/arch/arm/lib/copy_from_user.S
854 @@ -16,7 +16,7 @@
855 /*
856 * Prototype:
857 *
858 - * size_t __copy_from_user(void *to, const void *from, size_t n)
859 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
860 *
861 * Purpose:
862 *
863 @@ -84,11 +84,11 @@
864
865 .text
866
867 -ENTRY(__copy_from_user)
868 +ENTRY(___copy_from_user)
869
870 #include "copy_template.S"
871
872 -ENDPROC(__copy_from_user)
873 +ENDPROC(___copy_from_user)
874
875 .pushsection .fixup,"ax"
876 .align 0
877 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
878 index d066df6..df28194 100644
879 --- a/arch/arm/lib/copy_to_user.S
880 +++ b/arch/arm/lib/copy_to_user.S
881 @@ -16,7 +16,7 @@
882 /*
883 * Prototype:
884 *
885 - * size_t __copy_to_user(void *to, const void *from, size_t n)
886 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
887 *
888 * Purpose:
889 *
890 @@ -88,11 +88,11 @@
891 .text
892
893 ENTRY(__copy_to_user_std)
894 -WEAK(__copy_to_user)
895 +WEAK(___copy_to_user)
896
897 #include "copy_template.S"
898
899 -ENDPROC(__copy_to_user)
900 +ENDPROC(___copy_to_user)
901 ENDPROC(__copy_to_user_std)
902
903 .pushsection .fixup,"ax"
904 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
905 index d0ece2a..5ae2f39 100644
906 --- a/arch/arm/lib/uaccess.S
907 +++ b/arch/arm/lib/uaccess.S
908 @@ -20,7 +20,7 @@
909
910 #define PAGE_SHIFT 12
911
912 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
913 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
914 * Purpose : copy a block to user memory from kernel memory
915 * Params : to - user memory
916 * : from - kernel memory
917 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
918 sub r2, r2, ip
919 b .Lc2u_dest_aligned
920
921 -ENTRY(__copy_to_user)
922 +ENTRY(___copy_to_user)
923 stmfd sp!, {r2, r4 - r7, lr}
924 cmp r2, #4
925 blt .Lc2u_not_enough
926 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
927 ldrgtb r3, [r1], #0
928 USER( T(strgtb) r3, [r0], #1) @ May fault
929 b .Lc2u_finished
930 -ENDPROC(__copy_to_user)
931 +ENDPROC(___copy_to_user)
932
933 .pushsection .fixup,"ax"
934 .align 0
935 9001: ldmfd sp!, {r0, r4 - r7, pc}
936 .popsection
937
938 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
939 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
940 * Purpose : copy a block from user memory to kernel memory
941 * Params : to - kernel memory
942 * : from - user memory
943 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
944 sub r2, r2, ip
945 b .Lcfu_dest_aligned
946
947 -ENTRY(__copy_from_user)
948 +ENTRY(___copy_from_user)
949 stmfd sp!, {r0, r2, r4 - r7, lr}
950 cmp r2, #4
951 blt .Lcfu_not_enough
952 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
953 USER( T(ldrgtb) r3, [r1], #1) @ May fault
954 strgtb r3, [r0], #1
955 b .Lcfu_finished
956 -ENDPROC(__copy_from_user)
957 +ENDPROC(___copy_from_user)
958
959 .pushsection .fixup,"ax"
960 .align 0
961 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
962 index 025f742..8432b08 100644
963 --- a/arch/arm/lib/uaccess_with_memcpy.c
964 +++ b/arch/arm/lib/uaccess_with_memcpy.c
965 @@ -104,7 +104,7 @@ out:
966 }
967
968 unsigned long
969 -__copy_to_user(void __user *to, const void *from, unsigned long n)
970 +___copy_to_user(void __user *to, const void *from, unsigned long n)
971 {
972 /*
973 * This test is stubbed out of the main function above to keep
974 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
975 index 2b2d51c..0127490 100644
976 --- a/arch/arm/mach-ux500/mbox-db5500.c
977 +++ b/arch/arm/mach-ux500/mbox-db5500.c
978 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
979 return sprintf(buf, "0x%X\n", mbox_value);
980 }
981
982 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
983 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
984
985 static int mbox_show(struct seq_file *s, void *data)
986 {
987 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
988 index aa33949..b242a2f 100644
989 --- a/arch/arm/mm/fault.c
990 +++ b/arch/arm/mm/fault.c
991 @@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
992 }
993 #endif
994
995 +#ifdef CONFIG_PAX_PAGEEXEC
996 + if (fsr & FSR_LNX_PF) {
997 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
998 + do_group_exit(SIGKILL);
999 + }
1000 +#endif
1001 +
1002 tsk->thread.address = addr;
1003 tsk->thread.error_code = fsr;
1004 tsk->thread.trap_no = 14;
1005 @@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1006 }
1007 #endif /* CONFIG_MMU */
1008
1009 +#ifdef CONFIG_PAX_PAGEEXEC
1010 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1011 +{
1012 + long i;
1013 +
1014 + printk(KERN_ERR "PAX: bytes at PC: ");
1015 + for (i = 0; i < 20; i++) {
1016 + unsigned char c;
1017 + if (get_user(c, (__force unsigned char __user *)pc+i))
1018 + printk(KERN_CONT "?? ");
1019 + else
1020 + printk(KERN_CONT "%02x ", c);
1021 + }
1022 + printk("\n");
1023 +
1024 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1025 + for (i = -1; i < 20; i++) {
1026 + unsigned long c;
1027 + if (get_user(c, (__force unsigned long __user *)sp+i))
1028 + printk(KERN_CONT "???????? ");
1029 + else
1030 + printk(KERN_CONT "%08lx ", c);
1031 + }
1032 + printk("\n");
1033 +}
1034 +#endif
1035 +
1036 /*
1037 * First Level Translation Fault Handler
1038 *
1039 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1040 index 44b628e..623ee2a 100644
1041 --- a/arch/arm/mm/mmap.c
1042 +++ b/arch/arm/mm/mmap.c
1043 @@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1044 if (len > TASK_SIZE)
1045 return -ENOMEM;
1046
1047 +#ifdef CONFIG_PAX_RANDMMAP
1048 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1049 +#endif
1050 +
1051 if (addr) {
1052 if (do_align)
1053 addr = COLOUR_ALIGN(addr, pgoff);
1054 @@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1055 addr = PAGE_ALIGN(addr);
1056
1057 vma = find_vma(mm, addr);
1058 - if (TASK_SIZE - len >= addr &&
1059 - (!vma || addr + len <= vma->vm_start))
1060 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1061 return addr;
1062 }
1063 if (len > mm->cached_hole_size) {
1064 - start_addr = addr = mm->free_area_cache;
1065 + start_addr = addr = mm->free_area_cache;
1066 } else {
1067 - start_addr = addr = TASK_UNMAPPED_BASE;
1068 - mm->cached_hole_size = 0;
1069 + start_addr = addr = mm->mmap_base;
1070 + mm->cached_hole_size = 0;
1071 }
1072 /* 8 bits of randomness in 20 address space bits */
1073 if ((current->flags & PF_RANDOMIZE) &&
1074 @@ -89,14 +92,14 @@ full_search:
1075 * Start a new search - just in case we missed
1076 * some holes.
1077 */
1078 - if (start_addr != TASK_UNMAPPED_BASE) {
1079 - start_addr = addr = TASK_UNMAPPED_BASE;
1080 + if (start_addr != mm->mmap_base) {
1081 + start_addr = addr = mm->mmap_base;
1082 mm->cached_hole_size = 0;
1083 goto full_search;
1084 }
1085 return -ENOMEM;
1086 }
1087 - if (!vma || addr + len <= vma->vm_start) {
1088 + if (check_heap_stack_gap(vma, addr, len)) {
1089 /*
1090 * Remember the place where we stopped the search:
1091 */
1092 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1093 index 3b3159b..425ea94 100644
1094 --- a/arch/avr32/include/asm/elf.h
1095 +++ b/arch/avr32/include/asm/elf.h
1096 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1097 the loader. We need to make sure that it is out of the way of the program
1098 that it will "exec", and that there is sufficient room for the brk. */
1099
1100 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1101 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1102
1103 +#ifdef CONFIG_PAX_ASLR
1104 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1105 +
1106 +#define PAX_DELTA_MMAP_LEN 15
1107 +#define PAX_DELTA_STACK_LEN 15
1108 +#endif
1109
1110 /* This yields a mask that user programs can use to figure out what
1111 instruction set this CPU supports. This could be done in user space,
1112 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1113 index b7f5c68..556135c 100644
1114 --- a/arch/avr32/include/asm/kmap_types.h
1115 +++ b/arch/avr32/include/asm/kmap_types.h
1116 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1117 D(11) KM_IRQ1,
1118 D(12) KM_SOFTIRQ0,
1119 D(13) KM_SOFTIRQ1,
1120 -D(14) KM_TYPE_NR
1121 +D(14) KM_CLEARPAGE,
1122 +D(15) KM_TYPE_NR
1123 };
1124
1125 #undef D
1126 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1127 index f7040a1..db9f300 100644
1128 --- a/arch/avr32/mm/fault.c
1129 +++ b/arch/avr32/mm/fault.c
1130 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1131
1132 int exception_trace = 1;
1133
1134 +#ifdef CONFIG_PAX_PAGEEXEC
1135 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1136 +{
1137 + unsigned long i;
1138 +
1139 + printk(KERN_ERR "PAX: bytes at PC: ");
1140 + for (i = 0; i < 20; i++) {
1141 + unsigned char c;
1142 + if (get_user(c, (unsigned char *)pc+i))
1143 + printk(KERN_CONT "???????? ");
1144 + else
1145 + printk(KERN_CONT "%02x ", c);
1146 + }
1147 + printk("\n");
1148 +}
1149 +#endif
1150 +
1151 /*
1152 * This routine handles page faults. It determines the address and the
1153 * problem, and then passes it off to one of the appropriate routines.
1154 @@ -156,6 +173,16 @@ bad_area:
1155 up_read(&mm->mmap_sem);
1156
1157 if (user_mode(regs)) {
1158 +
1159 +#ifdef CONFIG_PAX_PAGEEXEC
1160 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1161 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1162 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1163 + do_group_exit(SIGKILL);
1164 + }
1165 + }
1166 +#endif
1167 +
1168 if (exception_trace && printk_ratelimit())
1169 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1170 "sp %08lx ecr %lu\n",
1171 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1172 index 0d8a7d6..d0c9ff5 100644
1173 --- a/arch/frv/include/asm/atomic.h
1174 +++ b/arch/frv/include/asm/atomic.h
1175 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1176 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1177 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1178
1179 +#define atomic64_read_unchecked(v) atomic64_read(v)
1180 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1181 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1182 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1183 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1184 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1185 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1186 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1187 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1188 +
1189 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
1190 {
1191 int c, old;
1192 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1193 index f8e16b2..c73ff79 100644
1194 --- a/arch/frv/include/asm/kmap_types.h
1195 +++ b/arch/frv/include/asm/kmap_types.h
1196 @@ -23,6 +23,7 @@ enum km_type {
1197 KM_IRQ1,
1198 KM_SOFTIRQ0,
1199 KM_SOFTIRQ1,
1200 + KM_CLEARPAGE,
1201 KM_TYPE_NR
1202 };
1203
1204 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1205 index 385fd30..6c3d97e 100644
1206 --- a/arch/frv/mm/elf-fdpic.c
1207 +++ b/arch/frv/mm/elf-fdpic.c
1208 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1209 if (addr) {
1210 addr = PAGE_ALIGN(addr);
1211 vma = find_vma(current->mm, addr);
1212 - if (TASK_SIZE - len >= addr &&
1213 - (!vma || addr + len <= vma->vm_start))
1214 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1215 goto success;
1216 }
1217
1218 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1219 for (; vma; vma = vma->vm_next) {
1220 if (addr > limit)
1221 break;
1222 - if (addr + len <= vma->vm_start)
1223 + if (check_heap_stack_gap(vma, addr, len))
1224 goto success;
1225 addr = vma->vm_end;
1226 }
1227 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1228 for (; vma; vma = vma->vm_next) {
1229 if (addr > limit)
1230 break;
1231 - if (addr + len <= vma->vm_start)
1232 + if (check_heap_stack_gap(vma, addr, len))
1233 goto success;
1234 addr = vma->vm_end;
1235 }
1236 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1237 index 3fad89e..3047da5 100644
1238 --- a/arch/ia64/include/asm/atomic.h
1239 +++ b/arch/ia64/include/asm/atomic.h
1240 @@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1241 #define atomic64_inc(v) atomic64_add(1, (v))
1242 #define atomic64_dec(v) atomic64_sub(1, (v))
1243
1244 +#define atomic64_read_unchecked(v) atomic64_read(v)
1245 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1246 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1247 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1248 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1249 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1250 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1251 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1252 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1253 +
1254 /* Atomic operations are already serializing */
1255 #define smp_mb__before_atomic_dec() barrier()
1256 #define smp_mb__after_atomic_dec() barrier()
1257 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1258 index b5298eb..67c6e62 100644
1259 --- a/arch/ia64/include/asm/elf.h
1260 +++ b/arch/ia64/include/asm/elf.h
1261 @@ -42,6 +42,13 @@
1262 */
1263 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1264
1265 +#ifdef CONFIG_PAX_ASLR
1266 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1267 +
1268 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1269 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1270 +#endif
1271 +
1272 #define PT_IA_64_UNWIND 0x70000001
1273
1274 /* IA-64 relocations: */
1275 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1276 index 1a97af3..7529d31 100644
1277 --- a/arch/ia64/include/asm/pgtable.h
1278 +++ b/arch/ia64/include/asm/pgtable.h
1279 @@ -12,7 +12,7 @@
1280 * David Mosberger-Tang <davidm@hpl.hp.com>
1281 */
1282
1283 -
1284 +#include <linux/const.h>
1285 #include <asm/mman.h>
1286 #include <asm/page.h>
1287 #include <asm/processor.h>
1288 @@ -143,6 +143,17 @@
1289 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1290 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1291 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1292 +
1293 +#ifdef CONFIG_PAX_PAGEEXEC
1294 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1295 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1296 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1297 +#else
1298 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1299 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1300 +# define PAGE_COPY_NOEXEC PAGE_COPY
1301 +#endif
1302 +
1303 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1304 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1305 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1306 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1307 index b77768d..e0795eb 100644
1308 --- a/arch/ia64/include/asm/spinlock.h
1309 +++ b/arch/ia64/include/asm/spinlock.h
1310 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1311 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1312
1313 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1314 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1315 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1316 }
1317
1318 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1319 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1320 index 449c8c0..432a3d2 100644
1321 --- a/arch/ia64/include/asm/uaccess.h
1322 +++ b/arch/ia64/include/asm/uaccess.h
1323 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1324 const void *__cu_from = (from); \
1325 long __cu_len = (n); \
1326 \
1327 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1328 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1329 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1330 __cu_len; \
1331 })
1332 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1333 long __cu_len = (n); \
1334 \
1335 __chk_user_ptr(__cu_from); \
1336 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1337 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1338 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1339 __cu_len; \
1340 })
1341 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1342 index 24603be..948052d 100644
1343 --- a/arch/ia64/kernel/module.c
1344 +++ b/arch/ia64/kernel/module.c
1345 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1346 void
1347 module_free (struct module *mod, void *module_region)
1348 {
1349 - if (mod && mod->arch.init_unw_table &&
1350 - module_region == mod->module_init) {
1351 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1352 unw_remove_unwind_table(mod->arch.init_unw_table);
1353 mod->arch.init_unw_table = NULL;
1354 }
1355 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1356 }
1357
1358 static inline int
1359 +in_init_rx (const struct module *mod, uint64_t addr)
1360 +{
1361 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1362 +}
1363 +
1364 +static inline int
1365 +in_init_rw (const struct module *mod, uint64_t addr)
1366 +{
1367 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1368 +}
1369 +
1370 +static inline int
1371 in_init (const struct module *mod, uint64_t addr)
1372 {
1373 - return addr - (uint64_t) mod->module_init < mod->init_size;
1374 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1375 +}
1376 +
1377 +static inline int
1378 +in_core_rx (const struct module *mod, uint64_t addr)
1379 +{
1380 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1381 +}
1382 +
1383 +static inline int
1384 +in_core_rw (const struct module *mod, uint64_t addr)
1385 +{
1386 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1387 }
1388
1389 static inline int
1390 in_core (const struct module *mod, uint64_t addr)
1391 {
1392 - return addr - (uint64_t) mod->module_core < mod->core_size;
1393 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1394 }
1395
1396 static inline int
1397 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1398 break;
1399
1400 case RV_BDREL:
1401 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1402 + if (in_init_rx(mod, val))
1403 + val -= (uint64_t) mod->module_init_rx;
1404 + else if (in_init_rw(mod, val))
1405 + val -= (uint64_t) mod->module_init_rw;
1406 + else if (in_core_rx(mod, val))
1407 + val -= (uint64_t) mod->module_core_rx;
1408 + else if (in_core_rw(mod, val))
1409 + val -= (uint64_t) mod->module_core_rw;
1410 break;
1411
1412 case RV_LTV:
1413 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1414 * addresses have been selected...
1415 */
1416 uint64_t gp;
1417 - if (mod->core_size > MAX_LTOFF)
1418 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1419 /*
1420 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1421 * at the end of the module.
1422 */
1423 - gp = mod->core_size - MAX_LTOFF / 2;
1424 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1425 else
1426 - gp = mod->core_size / 2;
1427 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1428 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1429 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1430 mod->arch.gp = gp;
1431 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1432 }
1433 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1434 index 609d500..7dde2a8 100644
1435 --- a/arch/ia64/kernel/sys_ia64.c
1436 +++ b/arch/ia64/kernel/sys_ia64.c
1437 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1438 if (REGION_NUMBER(addr) == RGN_HPAGE)
1439 addr = 0;
1440 #endif
1441 +
1442 +#ifdef CONFIG_PAX_RANDMMAP
1443 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1444 + addr = mm->free_area_cache;
1445 + else
1446 +#endif
1447 +
1448 if (!addr)
1449 addr = mm->free_area_cache;
1450
1451 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1452 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1453 /* At this point: (!vma || addr < vma->vm_end). */
1454 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1455 - if (start_addr != TASK_UNMAPPED_BASE) {
1456 + if (start_addr != mm->mmap_base) {
1457 /* Start a new search --- just in case we missed some holes. */
1458 - addr = TASK_UNMAPPED_BASE;
1459 + addr = mm->mmap_base;
1460 goto full_search;
1461 }
1462 return -ENOMEM;
1463 }
1464 - if (!vma || addr + len <= vma->vm_start) {
1465 + if (check_heap_stack_gap(vma, addr, len)) {
1466 /* Remember the address where we stopped this search: */
1467 mm->free_area_cache = addr + len;
1468 return addr;
1469 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1470 index 53c0ba0..2accdde 100644
1471 --- a/arch/ia64/kernel/vmlinux.lds.S
1472 +++ b/arch/ia64/kernel/vmlinux.lds.S
1473 @@ -199,7 +199,7 @@ SECTIONS {
1474 /* Per-cpu data: */
1475 . = ALIGN(PERCPU_PAGE_SIZE);
1476 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1477 - __phys_per_cpu_start = __per_cpu_load;
1478 + __phys_per_cpu_start = per_cpu_load;
1479 /*
1480 * ensure percpu data fits
1481 * into percpu page size
1482 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1483 index 20b3593..1ce77f0 100644
1484 --- a/arch/ia64/mm/fault.c
1485 +++ b/arch/ia64/mm/fault.c
1486 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1487 return pte_present(pte);
1488 }
1489
1490 +#ifdef CONFIG_PAX_PAGEEXEC
1491 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1492 +{
1493 + unsigned long i;
1494 +
1495 + printk(KERN_ERR "PAX: bytes at PC: ");
1496 + for (i = 0; i < 8; i++) {
1497 + unsigned int c;
1498 + if (get_user(c, (unsigned int *)pc+i))
1499 + printk(KERN_CONT "???????? ");
1500 + else
1501 + printk(KERN_CONT "%08x ", c);
1502 + }
1503 + printk("\n");
1504 +}
1505 +#endif
1506 +
1507 void __kprobes
1508 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1509 {
1510 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1511 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1512 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1513
1514 - if ((vma->vm_flags & mask) != mask)
1515 + if ((vma->vm_flags & mask) != mask) {
1516 +
1517 +#ifdef CONFIG_PAX_PAGEEXEC
1518 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1519 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1520 + goto bad_area;
1521 +
1522 + up_read(&mm->mmap_sem);
1523 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1524 + do_group_exit(SIGKILL);
1525 + }
1526 +#endif
1527 +
1528 goto bad_area;
1529
1530 + }
1531 +
1532 /*
1533 * If for any reason at all we couldn't handle the fault, make
1534 * sure we exit gracefully rather than endlessly redo the
1535 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1536 index 5ca674b..e0e1b70 100644
1537 --- a/arch/ia64/mm/hugetlbpage.c
1538 +++ b/arch/ia64/mm/hugetlbpage.c
1539 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1540 /* At this point: (!vmm || addr < vmm->vm_end). */
1541 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1542 return -ENOMEM;
1543 - if (!vmm || (addr + len) <= vmm->vm_start)
1544 + if (check_heap_stack_gap(vmm, addr, len))
1545 return addr;
1546 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1547 }
1548 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1549 index 00cb0e2..2ad8024 100644
1550 --- a/arch/ia64/mm/init.c
1551 +++ b/arch/ia64/mm/init.c
1552 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1553 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1554 vma->vm_end = vma->vm_start + PAGE_SIZE;
1555 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1556 +
1557 +#ifdef CONFIG_PAX_PAGEEXEC
1558 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1559 + vma->vm_flags &= ~VM_EXEC;
1560 +
1561 +#ifdef CONFIG_PAX_MPROTECT
1562 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1563 + vma->vm_flags &= ~VM_MAYEXEC;
1564 +#endif
1565 +
1566 + }
1567 +#endif
1568 +
1569 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1570 down_write(&current->mm->mmap_sem);
1571 if (insert_vm_struct(current->mm, vma)) {
1572 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1573 index 82abd15..d95ae5d 100644
1574 --- a/arch/m32r/lib/usercopy.c
1575 +++ b/arch/m32r/lib/usercopy.c
1576 @@ -14,6 +14,9 @@
1577 unsigned long
1578 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1579 {
1580 + if ((long)n < 0)
1581 + return n;
1582 +
1583 prefetch(from);
1584 if (access_ok(VERIFY_WRITE, to, n))
1585 __copy_user(to,from,n);
1586 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1587 unsigned long
1588 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1589 {
1590 + if ((long)n < 0)
1591 + return n;
1592 +
1593 prefetchw(to);
1594 if (access_ok(VERIFY_READ, from, n))
1595 __copy_user_zeroing(to,from,n);
1596 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
1597 index 1d93f81..67794d0 100644
1598 --- a/arch/mips/include/asm/atomic.h
1599 +++ b/arch/mips/include/asm/atomic.h
1600 @@ -21,6 +21,10 @@
1601 #include <asm/war.h>
1602 #include <asm/system.h>
1603
1604 +#ifdef CONFIG_GENERIC_ATOMIC64
1605 +#include <asm-generic/atomic64.h>
1606 +#endif
1607 +
1608 #define ATOMIC_INIT(i) { (i) }
1609
1610 /*
1611 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
1612 */
1613 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
1614
1615 +#define atomic64_read_unchecked(v) atomic64_read(v)
1616 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1617 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1618 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1619 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1620 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1621 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1622 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1623 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1624 +
1625 #endif /* CONFIG_64BIT */
1626
1627 /*
1628 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1629 index 455c0ac..ad65fbe 100644
1630 --- a/arch/mips/include/asm/elf.h
1631 +++ b/arch/mips/include/asm/elf.h
1632 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1633 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1634 #endif
1635
1636 +#ifdef CONFIG_PAX_ASLR
1637 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1638 +
1639 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1640 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1641 +#endif
1642 +
1643 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1644 struct linux_binprm;
1645 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1646 int uses_interp);
1647
1648 -struct mm_struct;
1649 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1650 -#define arch_randomize_brk arch_randomize_brk
1651 -
1652 #endif /* _ASM_ELF_H */
1653 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1654 index e59cd1a..8e329d6 100644
1655 --- a/arch/mips/include/asm/page.h
1656 +++ b/arch/mips/include/asm/page.h
1657 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1658 #ifdef CONFIG_CPU_MIPS32
1659 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1660 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1661 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1662 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1663 #else
1664 typedef struct { unsigned long long pte; } pte_t;
1665 #define pte_val(x) ((x).pte)
1666 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1667 index 6018c80..7c37203 100644
1668 --- a/arch/mips/include/asm/system.h
1669 +++ b/arch/mips/include/asm/system.h
1670 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1671 */
1672 #define __ARCH_WANT_UNLOCKED_CTXSW
1673
1674 -extern unsigned long arch_align_stack(unsigned long sp);
1675 +#define arch_align_stack(x) ((x) & ~0xfUL)
1676
1677 #endif /* _ASM_SYSTEM_H */
1678 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1679 index 9fdd8bc..4bd7f1a 100644
1680 --- a/arch/mips/kernel/binfmt_elfn32.c
1681 +++ b/arch/mips/kernel/binfmt_elfn32.c
1682 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1683 #undef ELF_ET_DYN_BASE
1684 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1685
1686 +#ifdef CONFIG_PAX_ASLR
1687 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1688 +
1689 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1690 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1691 +#endif
1692 +
1693 #include <asm/processor.h>
1694 #include <linux/module.h>
1695 #include <linux/elfcore.h>
1696 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1697 index ff44823..97f8906 100644
1698 --- a/arch/mips/kernel/binfmt_elfo32.c
1699 +++ b/arch/mips/kernel/binfmt_elfo32.c
1700 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1701 #undef ELF_ET_DYN_BASE
1702 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1703
1704 +#ifdef CONFIG_PAX_ASLR
1705 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1706 +
1707 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1708 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1709 +#endif
1710 +
1711 #include <asm/processor.h>
1712
1713 /*
1714 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1715 index c47f96e..661d418 100644
1716 --- a/arch/mips/kernel/process.c
1717 +++ b/arch/mips/kernel/process.c
1718 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1719 out:
1720 return pc;
1721 }
1722 -
1723 -/*
1724 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1725 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1726 - */
1727 -unsigned long arch_align_stack(unsigned long sp)
1728 -{
1729 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1730 - sp -= get_random_int() & ~PAGE_MASK;
1731 -
1732 - return sp & ALMASK;
1733 -}
1734 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1735 index 937cf33..adb39bb 100644
1736 --- a/arch/mips/mm/fault.c
1737 +++ b/arch/mips/mm/fault.c
1738 @@ -28,6 +28,23 @@
1739 #include <asm/highmem.h> /* For VMALLOC_END */
1740 #include <linux/kdebug.h>
1741
1742 +#ifdef CONFIG_PAX_PAGEEXEC
1743 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1744 +{
1745 + unsigned long i;
1746 +
1747 + printk(KERN_ERR "PAX: bytes at PC: ");
1748 + for (i = 0; i < 5; i++) {
1749 + unsigned int c;
1750 + if (get_user(c, (unsigned int *)pc+i))
1751 + printk(KERN_CONT "???????? ");
1752 + else
1753 + printk(KERN_CONT "%08x ", c);
1754 + }
1755 + printk("\n");
1756 +}
1757 +#endif
1758 +
1759 /*
1760 * This routine handles page faults. It determines the address,
1761 * and the problem, and then passes it off to one of the appropriate
1762 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1763 index 302d779..7d35bf8 100644
1764 --- a/arch/mips/mm/mmap.c
1765 +++ b/arch/mips/mm/mmap.c
1766 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1767 do_color_align = 1;
1768
1769 /* requesting a specific address */
1770 +
1771 +#ifdef CONFIG_PAX_RANDMMAP
1772 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1773 +#endif
1774 +
1775 if (addr) {
1776 if (do_color_align)
1777 addr = COLOUR_ALIGN(addr, pgoff);
1778 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1779 addr = PAGE_ALIGN(addr);
1780
1781 vma = find_vma(mm, addr);
1782 - if (TASK_SIZE - len >= addr &&
1783 - (!vma || addr + len <= vma->vm_start))
1784 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1785 return addr;
1786 }
1787
1788 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1789 /* At this point: (!vma || addr < vma->vm_end). */
1790 if (TASK_SIZE - len < addr)
1791 return -ENOMEM;
1792 - if (!vma || addr + len <= vma->vm_start)
1793 + if (check_heap_stack_gap(vmm, addr, len))
1794 return addr;
1795 addr = vma->vm_end;
1796 if (do_color_align)
1797 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1798 /* make sure it can fit in the remaining address space */
1799 if (likely(addr > len)) {
1800 vma = find_vma(mm, addr - len);
1801 - if (!vma || addr <= vma->vm_start) {
1802 + if (check_heap_stack_gap(vmm, addr - len, len))
1803 /* cache the address as a hint for next time */
1804 return mm->free_area_cache = addr - len;
1805 }
1806 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1807 * return with success:
1808 */
1809 vma = find_vma(mm, addr);
1810 - if (likely(!vma || addr + len <= vma->vm_start)) {
1811 + if (check_heap_stack_gap(vmm, addr, len)) {
1812 /* cache the address as a hint for next time */
1813 return mm->free_area_cache = addr;
1814 }
1815 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1816 mm->unmap_area = arch_unmap_area_topdown;
1817 }
1818 }
1819 -
1820 -static inline unsigned long brk_rnd(void)
1821 -{
1822 - unsigned long rnd = get_random_int();
1823 -
1824 - rnd = rnd << PAGE_SHIFT;
1825 - /* 8MB for 32bit, 256MB for 64bit */
1826 - if (TASK_IS_32BIT_ADDR)
1827 - rnd = rnd & 0x7ffffful;
1828 - else
1829 - rnd = rnd & 0xffffffful;
1830 -
1831 - return rnd;
1832 -}
1833 -
1834 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1835 -{
1836 - unsigned long base = mm->brk;
1837 - unsigned long ret;
1838 -
1839 - ret = PAGE_ALIGN(base + brk_rnd());
1840 -
1841 - if (ret < mm->brk)
1842 - return mm->brk;
1843 -
1844 - return ret;
1845 -}
1846 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
1847 index 4054b31..a10c105 100644
1848 --- a/arch/parisc/include/asm/atomic.h
1849 +++ b/arch/parisc/include/asm/atomic.h
1850 @@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
1851
1852 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
1853
1854 +#define atomic64_read_unchecked(v) atomic64_read(v)
1855 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1856 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1857 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1858 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1859 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1860 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1861 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1862 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1863 +
1864 #endif /* !CONFIG_64BIT */
1865
1866
1867 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1868 index 19f6cb1..6c78cf2 100644
1869 --- a/arch/parisc/include/asm/elf.h
1870 +++ b/arch/parisc/include/asm/elf.h
1871 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1872
1873 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1874
1875 +#ifdef CONFIG_PAX_ASLR
1876 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1877 +
1878 +#define PAX_DELTA_MMAP_LEN 16
1879 +#define PAX_DELTA_STACK_LEN 16
1880 +#endif
1881 +
1882 /* This yields a mask that user programs can use to figure out what
1883 instruction set this CPU supports. This could be done in user space,
1884 but it's not easy, and we've already done it here. */
1885 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1886 index 22dadeb..f6c2be4 100644
1887 --- a/arch/parisc/include/asm/pgtable.h
1888 +++ b/arch/parisc/include/asm/pgtable.h
1889 @@ -210,6 +210,17 @@ struct vm_area_struct;
1890 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1891 #define PAGE_COPY PAGE_EXECREAD
1892 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1893 +
1894 +#ifdef CONFIG_PAX_PAGEEXEC
1895 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1896 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1897 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1898 +#else
1899 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1900 +# define PAGE_COPY_NOEXEC PAGE_COPY
1901 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1902 +#endif
1903 +
1904 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1905 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1906 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1907 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1908 index 5e34ccf..672bc9c 100644
1909 --- a/arch/parisc/kernel/module.c
1910 +++ b/arch/parisc/kernel/module.c
1911 @@ -98,16 +98,38 @@
1912
1913 /* three functions to determine where in the module core
1914 * or init pieces the location is */
1915 +static inline int in_init_rx(struct module *me, void *loc)
1916 +{
1917 + return (loc >= me->module_init_rx &&
1918 + loc < (me->module_init_rx + me->init_size_rx));
1919 +}
1920 +
1921 +static inline int in_init_rw(struct module *me, void *loc)
1922 +{
1923 + return (loc >= me->module_init_rw &&
1924 + loc < (me->module_init_rw + me->init_size_rw));
1925 +}
1926 +
1927 static inline int in_init(struct module *me, void *loc)
1928 {
1929 - return (loc >= me->module_init &&
1930 - loc <= (me->module_init + me->init_size));
1931 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1932 +}
1933 +
1934 +static inline int in_core_rx(struct module *me, void *loc)
1935 +{
1936 + return (loc >= me->module_core_rx &&
1937 + loc < (me->module_core_rx + me->core_size_rx));
1938 +}
1939 +
1940 +static inline int in_core_rw(struct module *me, void *loc)
1941 +{
1942 + return (loc >= me->module_core_rw &&
1943 + loc < (me->module_core_rw + me->core_size_rw));
1944 }
1945
1946 static inline int in_core(struct module *me, void *loc)
1947 {
1948 - return (loc >= me->module_core &&
1949 - loc <= (me->module_core + me->core_size));
1950 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1951 }
1952
1953 static inline int in_local(struct module *me, void *loc)
1954 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1955 }
1956
1957 /* align things a bit */
1958 - me->core_size = ALIGN(me->core_size, 16);
1959 - me->arch.got_offset = me->core_size;
1960 - me->core_size += gots * sizeof(struct got_entry);
1961 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1962 + me->arch.got_offset = me->core_size_rw;
1963 + me->core_size_rw += gots * sizeof(struct got_entry);
1964
1965 - me->core_size = ALIGN(me->core_size, 16);
1966 - me->arch.fdesc_offset = me->core_size;
1967 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1968 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1969 + me->arch.fdesc_offset = me->core_size_rw;
1970 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1971
1972 me->arch.got_max = gots;
1973 me->arch.fdesc_max = fdescs;
1974 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1975
1976 BUG_ON(value == 0);
1977
1978 - got = me->module_core + me->arch.got_offset;
1979 + got = me->module_core_rw + me->arch.got_offset;
1980 for (i = 0; got[i].addr; i++)
1981 if (got[i].addr == value)
1982 goto out;
1983 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1984 #ifdef CONFIG_64BIT
1985 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1986 {
1987 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1988 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1989
1990 if (!value) {
1991 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1992 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1993
1994 /* Create new one */
1995 fdesc->addr = value;
1996 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1997 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1998 return (Elf_Addr)fdesc;
1999 }
2000 #endif /* CONFIG_64BIT */
2001 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
2002
2003 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2004 end = table + sechdrs[me->arch.unwind_section].sh_size;
2005 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2006 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2007
2008 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2009 me->arch.unwind_section, table, end, gp);
2010 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2011 index c9b9322..02d8940 100644
2012 --- a/arch/parisc/kernel/sys_parisc.c
2013 +++ b/arch/parisc/kernel/sys_parisc.c
2014 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2015 /* At this point: (!vma || addr < vma->vm_end). */
2016 if (TASK_SIZE - len < addr)
2017 return -ENOMEM;
2018 - if (!vma || addr + len <= vma->vm_start)
2019 + if (check_heap_stack_gap(vma, addr, len))
2020 return addr;
2021 addr = vma->vm_end;
2022 }
2023 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2024 /* At this point: (!vma || addr < vma->vm_end). */
2025 if (TASK_SIZE - len < addr)
2026 return -ENOMEM;
2027 - if (!vma || addr + len <= vma->vm_start)
2028 + if (check_heap_stack_gap(vma, addr, len))
2029 return addr;
2030 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2031 if (addr < vma->vm_end) /* handle wraparound */
2032 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2033 if (flags & MAP_FIXED)
2034 return addr;
2035 if (!addr)
2036 - addr = TASK_UNMAPPED_BASE;
2037 + addr = current->mm->mmap_base;
2038
2039 if (filp) {
2040 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2041 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2042 index f19e660..414fe24 100644
2043 --- a/arch/parisc/kernel/traps.c
2044 +++ b/arch/parisc/kernel/traps.c
2045 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2046
2047 down_read(&current->mm->mmap_sem);
2048 vma = find_vma(current->mm,regs->iaoq[0]);
2049 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2050 - && (vma->vm_flags & VM_EXEC)) {
2051 -
2052 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2053 fault_address = regs->iaoq[0];
2054 fault_space = regs->iasq[0];
2055
2056 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2057 index 18162ce..94de376 100644
2058 --- a/arch/parisc/mm/fault.c
2059 +++ b/arch/parisc/mm/fault.c
2060 @@ -15,6 +15,7 @@
2061 #include <linux/sched.h>
2062 #include <linux/interrupt.h>
2063 #include <linux/module.h>
2064 +#include <linux/unistd.h>
2065
2066 #include <asm/uaccess.h>
2067 #include <asm/traps.h>
2068 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2069 static unsigned long
2070 parisc_acctyp(unsigned long code, unsigned int inst)
2071 {
2072 - if (code == 6 || code == 16)
2073 + if (code == 6 || code == 7 || code == 16)
2074 return VM_EXEC;
2075
2076 switch (inst & 0xf0000000) {
2077 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2078 }
2079 #endif
2080
2081 +#ifdef CONFIG_PAX_PAGEEXEC
2082 +/*
2083 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2084 + *
2085 + * returns 1 when task should be killed
2086 + * 2 when rt_sigreturn trampoline was detected
2087 + * 3 when unpatched PLT trampoline was detected
2088 + */
2089 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2090 +{
2091 +
2092 +#ifdef CONFIG_PAX_EMUPLT
2093 + int err;
2094 +
2095 + do { /* PaX: unpatched PLT emulation */
2096 + unsigned int bl, depwi;
2097 +
2098 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2099 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2100 +
2101 + if (err)
2102 + break;
2103 +
2104 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2105 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2106 +
2107 + err = get_user(ldw, (unsigned int *)addr);
2108 + err |= get_user(bv, (unsigned int *)(addr+4));
2109 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2110 +
2111 + if (err)
2112 + break;
2113 +
2114 + if (ldw == 0x0E801096U &&
2115 + bv == 0xEAC0C000U &&
2116 + ldw2 == 0x0E881095U)
2117 + {
2118 + unsigned int resolver, map;
2119 +
2120 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2121 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2122 + if (err)
2123 + break;
2124 +
2125 + regs->gr[20] = instruction_pointer(regs)+8;
2126 + regs->gr[21] = map;
2127 + regs->gr[22] = resolver;
2128 + regs->iaoq[0] = resolver | 3UL;
2129 + regs->iaoq[1] = regs->iaoq[0] + 4;
2130 + return 3;
2131 + }
2132 + }
2133 + } while (0);
2134 +#endif
2135 +
2136 +#ifdef CONFIG_PAX_EMUTRAMP
2137 +
2138 +#ifndef CONFIG_PAX_EMUSIGRT
2139 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2140 + return 1;
2141 +#endif
2142 +
2143 + do { /* PaX: rt_sigreturn emulation */
2144 + unsigned int ldi1, ldi2, bel, nop;
2145 +
2146 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2147 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2148 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2149 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2150 +
2151 + if (err)
2152 + break;
2153 +
2154 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2155 + ldi2 == 0x3414015AU &&
2156 + bel == 0xE4008200U &&
2157 + nop == 0x08000240U)
2158 + {
2159 + regs->gr[25] = (ldi1 & 2) >> 1;
2160 + regs->gr[20] = __NR_rt_sigreturn;
2161 + regs->gr[31] = regs->iaoq[1] + 16;
2162 + regs->sr[0] = regs->iasq[1];
2163 + regs->iaoq[0] = 0x100UL;
2164 + regs->iaoq[1] = regs->iaoq[0] + 4;
2165 + regs->iasq[0] = regs->sr[2];
2166 + regs->iasq[1] = regs->sr[2];
2167 + return 2;
2168 + }
2169 + } while (0);
2170 +#endif
2171 +
2172 + return 1;
2173 +}
2174 +
2175 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2176 +{
2177 + unsigned long i;
2178 +
2179 + printk(KERN_ERR "PAX: bytes at PC: ");
2180 + for (i = 0; i < 5; i++) {
2181 + unsigned int c;
2182 + if (get_user(c, (unsigned int *)pc+i))
2183 + printk(KERN_CONT "???????? ");
2184 + else
2185 + printk(KERN_CONT "%08x ", c);
2186 + }
2187 + printk("\n");
2188 +}
2189 +#endif
2190 +
2191 int fixup_exception(struct pt_regs *regs)
2192 {
2193 const struct exception_table_entry *fix;
2194 @@ -192,8 +303,33 @@ good_area:
2195
2196 acc_type = parisc_acctyp(code,regs->iir);
2197
2198 - if ((vma->vm_flags & acc_type) != acc_type)
2199 + if ((vma->vm_flags & acc_type) != acc_type) {
2200 +
2201 +#ifdef CONFIG_PAX_PAGEEXEC
2202 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2203 + (address & ~3UL) == instruction_pointer(regs))
2204 + {
2205 + up_read(&mm->mmap_sem);
2206 + switch (pax_handle_fetch_fault(regs)) {
2207 +
2208 +#ifdef CONFIG_PAX_EMUPLT
2209 + case 3:
2210 + return;
2211 +#endif
2212 +
2213 +#ifdef CONFIG_PAX_EMUTRAMP
2214 + case 2:
2215 + return;
2216 +#endif
2217 +
2218 + }
2219 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2220 + do_group_exit(SIGKILL);
2221 + }
2222 +#endif
2223 +
2224 goto bad_area;
2225 + }
2226
2227 /*
2228 * If for any reason at all we couldn't handle the fault, make
2229 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
2230 index 02e41b5..ec6e26c 100644
2231 --- a/arch/powerpc/include/asm/atomic.h
2232 +++ b/arch/powerpc/include/asm/atomic.h
2233 @@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2234
2235 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2236
2237 +#define atomic64_read_unchecked(v) atomic64_read(v)
2238 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2239 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2240 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2241 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2242 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2243 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2244 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2245 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2246 +
2247 #endif /* __powerpc64__ */
2248
2249 #endif /* __KERNEL__ */
2250 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2251 index 3bf9cca..e7457d0 100644
2252 --- a/arch/powerpc/include/asm/elf.h
2253 +++ b/arch/powerpc/include/asm/elf.h
2254 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2255 the loader. We need to make sure that it is out of the way of the program
2256 that it will "exec", and that there is sufficient room for the brk. */
2257
2258 -extern unsigned long randomize_et_dyn(unsigned long base);
2259 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2260 +#define ELF_ET_DYN_BASE (0x20000000)
2261 +
2262 +#ifdef CONFIG_PAX_ASLR
2263 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2264 +
2265 +#ifdef __powerpc64__
2266 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2267 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2268 +#else
2269 +#define PAX_DELTA_MMAP_LEN 15
2270 +#define PAX_DELTA_STACK_LEN 15
2271 +#endif
2272 +#endif
2273
2274 /*
2275 * Our registers are always unsigned longs, whether we're a 32 bit
2276 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2277 (0x7ff >> (PAGE_SHIFT - 12)) : \
2278 (0x3ffff >> (PAGE_SHIFT - 12)))
2279
2280 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2281 -#define arch_randomize_brk arch_randomize_brk
2282 -
2283 #endif /* __KERNEL__ */
2284
2285 /*
2286 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2287 index bca8fdc..61e9580 100644
2288 --- a/arch/powerpc/include/asm/kmap_types.h
2289 +++ b/arch/powerpc/include/asm/kmap_types.h
2290 @@ -27,6 +27,7 @@ enum km_type {
2291 KM_PPC_SYNC_PAGE,
2292 KM_PPC_SYNC_ICACHE,
2293 KM_KDB,
2294 + KM_CLEARPAGE,
2295 KM_TYPE_NR
2296 };
2297
2298 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2299 index d4a7f64..451de1c 100644
2300 --- a/arch/powerpc/include/asm/mman.h
2301 +++ b/arch/powerpc/include/asm/mman.h
2302 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2303 }
2304 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2305
2306 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2307 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2308 {
2309 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2310 }
2311 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2312 index dd9c4fd..a2ced87 100644
2313 --- a/arch/powerpc/include/asm/page.h
2314 +++ b/arch/powerpc/include/asm/page.h
2315 @@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2316 * and needs to be executable. This means the whole heap ends
2317 * up being executable.
2318 */
2319 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2320 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2321 +#define VM_DATA_DEFAULT_FLAGS32 \
2322 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2323 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2324
2325 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2326 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2327 @@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2328 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2329 #endif
2330
2331 +#define ktla_ktva(addr) (addr)
2332 +#define ktva_ktla(addr) (addr)
2333 +
2334 /*
2335 * Use the top bit of the higher-level page table entries to indicate whether
2336 * the entries we point to contain hugepages. This works because we know that
2337 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2338 index fb40ede..d3ce956 100644
2339 --- a/arch/powerpc/include/asm/page_64.h
2340 +++ b/arch/powerpc/include/asm/page_64.h
2341 @@ -144,15 +144,18 @@ do { \
2342 * stack by default, so in the absence of a PT_GNU_STACK program header
2343 * we turn execute permission off.
2344 */
2345 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2346 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2347 +#define VM_STACK_DEFAULT_FLAGS32 \
2348 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2349 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2350
2351 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2352 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2353
2354 +#ifndef CONFIG_PAX_PAGEEXEC
2355 #define VM_STACK_DEFAULT_FLAGS \
2356 (is_32bit_task() ? \
2357 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2358 +#endif
2359
2360 #include <asm-generic/getorder.h>
2361
2362 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2363 index 88b0bd9..e32bc67 100644
2364 --- a/arch/powerpc/include/asm/pgtable.h
2365 +++ b/arch/powerpc/include/asm/pgtable.h
2366 @@ -2,6 +2,7 @@
2367 #define _ASM_POWERPC_PGTABLE_H
2368 #ifdef __KERNEL__
2369
2370 +#include <linux/const.h>
2371 #ifndef __ASSEMBLY__
2372 #include <asm/processor.h> /* For TASK_SIZE */
2373 #include <asm/mmu.h>
2374 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2375 index 4aad413..85d86bf 100644
2376 --- a/arch/powerpc/include/asm/pte-hash32.h
2377 +++ b/arch/powerpc/include/asm/pte-hash32.h
2378 @@ -21,6 +21,7 @@
2379 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2380 #define _PAGE_USER 0x004 /* usermode access allowed */
2381 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2382 +#define _PAGE_EXEC _PAGE_GUARDED
2383 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2384 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2385 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2386 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2387 index 559da19..7e5835c 100644
2388 --- a/arch/powerpc/include/asm/reg.h
2389 +++ b/arch/powerpc/include/asm/reg.h
2390 @@ -212,6 +212,7 @@
2391 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2392 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2393 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2394 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2395 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2396 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2397 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2398 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2399 index e30a13d..2b7d994 100644
2400 --- a/arch/powerpc/include/asm/system.h
2401 +++ b/arch/powerpc/include/asm/system.h
2402 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2403 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2404 #endif
2405
2406 -extern unsigned long arch_align_stack(unsigned long sp);
2407 +#define arch_align_stack(x) ((x) & ~0xfUL)
2408
2409 /* Used in very early kernel initialization. */
2410 extern unsigned long reloc_offset(void);
2411 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2412 index bd0fb84..a42a14b 100644
2413 --- a/arch/powerpc/include/asm/uaccess.h
2414 +++ b/arch/powerpc/include/asm/uaccess.h
2415 @@ -13,6 +13,8 @@
2416 #define VERIFY_READ 0
2417 #define VERIFY_WRITE 1
2418
2419 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2420 +
2421 /*
2422 * The fs value determines whether argument validity checking should be
2423 * performed or not. If get_fs() == USER_DS, checking is performed, with
2424 @@ -327,52 +329,6 @@ do { \
2425 extern unsigned long __copy_tofrom_user(void __user *to,
2426 const void __user *from, unsigned long size);
2427
2428 -#ifndef __powerpc64__
2429 -
2430 -static inline unsigned long copy_from_user(void *to,
2431 - const void __user *from, unsigned long n)
2432 -{
2433 - unsigned long over;
2434 -
2435 - if (access_ok(VERIFY_READ, from, n))
2436 - return __copy_tofrom_user((__force void __user *)to, from, n);
2437 - if ((unsigned long)from < TASK_SIZE) {
2438 - over = (unsigned long)from + n - TASK_SIZE;
2439 - return __copy_tofrom_user((__force void __user *)to, from,
2440 - n - over) + over;
2441 - }
2442 - return n;
2443 -}
2444 -
2445 -static inline unsigned long copy_to_user(void __user *to,
2446 - const void *from, unsigned long n)
2447 -{
2448 - unsigned long over;
2449 -
2450 - if (access_ok(VERIFY_WRITE, to, n))
2451 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2452 - if ((unsigned long)to < TASK_SIZE) {
2453 - over = (unsigned long)to + n - TASK_SIZE;
2454 - return __copy_tofrom_user(to, (__force void __user *)from,
2455 - n - over) + over;
2456 - }
2457 - return n;
2458 -}
2459 -
2460 -#else /* __powerpc64__ */
2461 -
2462 -#define __copy_in_user(to, from, size) \
2463 - __copy_tofrom_user((to), (from), (size))
2464 -
2465 -extern unsigned long copy_from_user(void *to, const void __user *from,
2466 - unsigned long n);
2467 -extern unsigned long copy_to_user(void __user *to, const void *from,
2468 - unsigned long n);
2469 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2470 - unsigned long n);
2471 -
2472 -#endif /* __powerpc64__ */
2473 -
2474 static inline unsigned long __copy_from_user_inatomic(void *to,
2475 const void __user *from, unsigned long n)
2476 {
2477 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2478 if (ret == 0)
2479 return 0;
2480 }
2481 +
2482 + if (!__builtin_constant_p(n))
2483 + check_object_size(to, n, false);
2484 +
2485 return __copy_tofrom_user((__force void __user *)to, from, n);
2486 }
2487
2488 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2489 if (ret == 0)
2490 return 0;
2491 }
2492 +
2493 + if (!__builtin_constant_p(n))
2494 + check_object_size(from, n, true);
2495 +
2496 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2497 }
2498
2499 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2500 return __copy_to_user_inatomic(to, from, size);
2501 }
2502
2503 +#ifndef __powerpc64__
2504 +
2505 +static inline unsigned long __must_check copy_from_user(void *to,
2506 + const void __user *from, unsigned long n)
2507 +{
2508 + unsigned long over;
2509 +
2510 + if ((long)n < 0)
2511 + return n;
2512 +
2513 + if (access_ok(VERIFY_READ, from, n)) {
2514 + if (!__builtin_constant_p(n))
2515 + check_object_size(to, n, false);
2516 + return __copy_tofrom_user((__force void __user *)to, from, n);
2517 + }
2518 + if ((unsigned long)from < TASK_SIZE) {
2519 + over = (unsigned long)from + n - TASK_SIZE;
2520 + if (!__builtin_constant_p(n - over))
2521 + check_object_size(to, n - over, false);
2522 + return __copy_tofrom_user((__force void __user *)to, from,
2523 + n - over) + over;
2524 + }
2525 + return n;
2526 +}
2527 +
2528 +static inline unsigned long __must_check copy_to_user(void __user *to,
2529 + const void *from, unsigned long n)
2530 +{
2531 + unsigned long over;
2532 +
2533 + if ((long)n < 0)
2534 + return n;
2535 +
2536 + if (access_ok(VERIFY_WRITE, to, n)) {
2537 + if (!__builtin_constant_p(n))
2538 + check_object_size(from, n, true);
2539 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2540 + }
2541 + if ((unsigned long)to < TASK_SIZE) {
2542 + over = (unsigned long)to + n - TASK_SIZE;
2543 + if (!__builtin_constant_p(n))
2544 + check_object_size(from, n - over, true);
2545 + return __copy_tofrom_user(to, (__force void __user *)from,
2546 + n - over) + over;
2547 + }
2548 + return n;
2549 +}
2550 +
2551 +#else /* __powerpc64__ */
2552 +
2553 +#define __copy_in_user(to, from, size) \
2554 + __copy_tofrom_user((to), (from), (size))
2555 +
2556 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2557 +{
2558 + if ((long)n < 0 || n > INT_MAX)
2559 + return n;
2560 +
2561 + if (!__builtin_constant_p(n))
2562 + check_object_size(to, n, false);
2563 +
2564 + if (likely(access_ok(VERIFY_READ, from, n)))
2565 + n = __copy_from_user(to, from, n);
2566 + else
2567 + memset(to, 0, n);
2568 + return n;
2569 +}
2570 +
2571 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2572 +{
2573 + if ((long)n < 0 || n > INT_MAX)
2574 + return n;
2575 +
2576 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2577 + if (!__builtin_constant_p(n))
2578 + check_object_size(from, n, true);
2579 + n = __copy_to_user(to, from, n);
2580 + }
2581 + return n;
2582 +}
2583 +
2584 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2585 + unsigned long n);
2586 +
2587 +#endif /* __powerpc64__ */
2588 +
2589 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2590
2591 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2592 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2593 index 429983c..7af363b 100644
2594 --- a/arch/powerpc/kernel/exceptions-64e.S
2595 +++ b/arch/powerpc/kernel/exceptions-64e.S
2596 @@ -587,6 +587,7 @@ storage_fault_common:
2597 std r14,_DAR(r1)
2598 std r15,_DSISR(r1)
2599 addi r3,r1,STACK_FRAME_OVERHEAD
2600 + bl .save_nvgprs
2601 mr r4,r14
2602 mr r5,r15
2603 ld r14,PACA_EXGEN+EX_R14(r13)
2604 @@ -596,8 +597,7 @@ storage_fault_common:
2605 cmpdi r3,0
2606 bne- 1f
2607 b .ret_from_except_lite
2608 -1: bl .save_nvgprs
2609 - mr r5,r3
2610 +1: mr r5,r3
2611 addi r3,r1,STACK_FRAME_OVERHEAD
2612 ld r4,_DAR(r1)
2613 bl .bad_page_fault
2614 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2615 index cf9c69b..ebc9640 100644
2616 --- a/arch/powerpc/kernel/exceptions-64s.S
2617 +++ b/arch/powerpc/kernel/exceptions-64s.S
2618 @@ -1004,10 +1004,10 @@ handle_page_fault:
2619 11: ld r4,_DAR(r1)
2620 ld r5,_DSISR(r1)
2621 addi r3,r1,STACK_FRAME_OVERHEAD
2622 + bl .save_nvgprs
2623 bl .do_page_fault
2624 cmpdi r3,0
2625 beq+ 13f
2626 - bl .save_nvgprs
2627 mr r5,r3
2628 addi r3,r1,STACK_FRAME_OVERHEAD
2629 lwz r4,_DAR(r1)
2630 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2631 index 0b6d796..d760ddb 100644
2632 --- a/arch/powerpc/kernel/module_32.c
2633 +++ b/arch/powerpc/kernel/module_32.c
2634 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2635 me->arch.core_plt_section = i;
2636 }
2637 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2638 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2639 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2640 return -ENOEXEC;
2641 }
2642
2643 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2644
2645 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2646 /* Init, or core PLT? */
2647 - if (location >= mod->module_core
2648 - && location < mod->module_core + mod->core_size)
2649 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2650 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2651 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2652 - else
2653 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2654 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2655 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2656 + else {
2657 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2658 + return ~0UL;
2659 + }
2660
2661 /* Find this entry, or if that fails, the next avail. entry */
2662 while (entry->jump[0]) {
2663 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2664 index 6457574..08b28d3 100644
2665 --- a/arch/powerpc/kernel/process.c
2666 +++ b/arch/powerpc/kernel/process.c
2667 @@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2668 * Lookup NIP late so we have the best change of getting the
2669 * above info out without failing
2670 */
2671 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2672 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2673 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2674 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2675 #endif
2676 show_stack(current, (unsigned long *) regs->gpr[1]);
2677 if (!user_mode(regs))
2678 @@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2679 newsp = stack[0];
2680 ip = stack[STACK_FRAME_LR_SAVE];
2681 if (!firstframe || ip != lr) {
2682 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2683 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2684 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2685 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2686 - printk(" (%pS)",
2687 + printk(" (%pA)",
2688 (void *)current->ret_stack[curr_frame].ret);
2689 curr_frame--;
2690 }
2691 @@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2692 struct pt_regs *regs = (struct pt_regs *)
2693 (sp + STACK_FRAME_OVERHEAD);
2694 lr = regs->link;
2695 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2696 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2697 regs->trap, (void *)regs->nip, (void *)lr);
2698 firstframe = 1;
2699 }
2700 @@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2701 }
2702
2703 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2704 -
2705 -unsigned long arch_align_stack(unsigned long sp)
2706 -{
2707 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2708 - sp -= get_random_int() & ~PAGE_MASK;
2709 - return sp & ~0xf;
2710 -}
2711 -
2712 -static inline unsigned long brk_rnd(void)
2713 -{
2714 - unsigned long rnd = 0;
2715 -
2716 - /* 8MB for 32bit, 1GB for 64bit */
2717 - if (is_32bit_task())
2718 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2719 - else
2720 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2721 -
2722 - return rnd << PAGE_SHIFT;
2723 -}
2724 -
2725 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2726 -{
2727 - unsigned long base = mm->brk;
2728 - unsigned long ret;
2729 -
2730 -#ifdef CONFIG_PPC_STD_MMU_64
2731 - /*
2732 - * If we are using 1TB segments and we are allowed to randomise
2733 - * the heap, we can put it above 1TB so it is backed by a 1TB
2734 - * segment. Otherwise the heap will be in the bottom 1TB
2735 - * which always uses 256MB segments and this may result in a
2736 - * performance penalty.
2737 - */
2738 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2739 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2740 -#endif
2741 -
2742 - ret = PAGE_ALIGN(base + brk_rnd());
2743 -
2744 - if (ret < mm->brk)
2745 - return mm->brk;
2746 -
2747 - return ret;
2748 -}
2749 -
2750 -unsigned long randomize_et_dyn(unsigned long base)
2751 -{
2752 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2753 -
2754 - if (ret < base)
2755 - return base;
2756 -
2757 - return ret;
2758 -}
2759 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2760 index 836a5a1..27289a3 100644
2761 --- a/arch/powerpc/kernel/signal_32.c
2762 +++ b/arch/powerpc/kernel/signal_32.c
2763 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2764 /* Save user registers on the stack */
2765 frame = &rt_sf->uc.uc_mcontext;
2766 addr = frame;
2767 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2768 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2769 if (save_user_regs(regs, frame, 0, 1))
2770 goto badframe;
2771 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2772 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2773 index a50b5ec..547078a 100644
2774 --- a/arch/powerpc/kernel/signal_64.c
2775 +++ b/arch/powerpc/kernel/signal_64.c
2776 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2777 current->thread.fpscr.val = 0;
2778
2779 /* Set up to return from userspace. */
2780 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2781 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2782 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2783 } else {
2784 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2785 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2786 index 5459d14..10f8070 100644
2787 --- a/arch/powerpc/kernel/traps.c
2788 +++ b/arch/powerpc/kernel/traps.c
2789 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2790 static inline void pmac_backlight_unblank(void) { }
2791 #endif
2792
2793 +extern void gr_handle_kernel_exploit(void);
2794 +
2795 int die(const char *str, struct pt_regs *regs, long err)
2796 {
2797 static struct {
2798 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2799 if (panic_on_oops)
2800 panic("Fatal exception");
2801
2802 + gr_handle_kernel_exploit();
2803 +
2804 oops_exit();
2805 do_exit(err);
2806
2807 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2808 index 7d14bb6..1305601 100644
2809 --- a/arch/powerpc/kernel/vdso.c
2810 +++ b/arch/powerpc/kernel/vdso.c
2811 @@ -35,6 +35,7 @@
2812 #include <asm/firmware.h>
2813 #include <asm/vdso.h>
2814 #include <asm/vdso_datapage.h>
2815 +#include <asm/mman.h>
2816
2817 #include "setup.h"
2818
2819 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2820 vdso_base = VDSO32_MBASE;
2821 #endif
2822
2823 - current->mm->context.vdso_base = 0;
2824 + current->mm->context.vdso_base = ~0UL;
2825
2826 /* vDSO has a problem and was disabled, just don't "enable" it for the
2827 * process
2828 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2829 vdso_base = get_unmapped_area(NULL, vdso_base,
2830 (vdso_pages << PAGE_SHIFT) +
2831 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2832 - 0, 0);
2833 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2834 if (IS_ERR_VALUE(vdso_base)) {
2835 rc = vdso_base;
2836 goto fail_mmapsem;
2837 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2838 index 5eea6f3..5d10396 100644
2839 --- a/arch/powerpc/lib/usercopy_64.c
2840 +++ b/arch/powerpc/lib/usercopy_64.c
2841 @@ -9,22 +9,6 @@
2842 #include <linux/module.h>
2843 #include <asm/uaccess.h>
2844
2845 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2846 -{
2847 - if (likely(access_ok(VERIFY_READ, from, n)))
2848 - n = __copy_from_user(to, from, n);
2849 - else
2850 - memset(to, 0, n);
2851 - return n;
2852 -}
2853 -
2854 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2855 -{
2856 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2857 - n = __copy_to_user(to, from, n);
2858 - return n;
2859 -}
2860 -
2861 unsigned long copy_in_user(void __user *to, const void __user *from,
2862 unsigned long n)
2863 {
2864 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2865 return n;
2866 }
2867
2868 -EXPORT_SYMBOL(copy_from_user);
2869 -EXPORT_SYMBOL(copy_to_user);
2870 EXPORT_SYMBOL(copy_in_user);
2871
2872 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2873 index 5efe8c9..db9ceef 100644
2874 --- a/arch/powerpc/mm/fault.c
2875 +++ b/arch/powerpc/mm/fault.c
2876 @@ -32,6 +32,10 @@
2877 #include <linux/perf_event.h>
2878 #include <linux/magic.h>
2879 #include <linux/ratelimit.h>
2880 +#include <linux/slab.h>
2881 +#include <linux/pagemap.h>
2882 +#include <linux/compiler.h>
2883 +#include <linux/unistd.h>
2884
2885 #include <asm/firmware.h>
2886 #include <asm/page.h>
2887 @@ -43,6 +47,7 @@
2888 #include <asm/tlbflush.h>
2889 #include <asm/siginfo.h>
2890 #include <mm/mmu_decl.h>
2891 +#include <asm/ptrace.h>
2892
2893 #ifdef CONFIG_KPROBES
2894 static inline int notify_page_fault(struct pt_regs *regs)
2895 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2896 }
2897 #endif
2898
2899 +#ifdef CONFIG_PAX_PAGEEXEC
2900 +/*
2901 + * PaX: decide what to do with offenders (regs->nip = fault address)
2902 + *
2903 + * returns 1 when task should be killed
2904 + */
2905 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2906 +{
2907 + return 1;
2908 +}
2909 +
2910 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2911 +{
2912 + unsigned long i;
2913 +
2914 + printk(KERN_ERR "PAX: bytes at PC: ");
2915 + for (i = 0; i < 5; i++) {
2916 + unsigned int c;
2917 + if (get_user(c, (unsigned int __user *)pc+i))
2918 + printk(KERN_CONT "???????? ");
2919 + else
2920 + printk(KERN_CONT "%08x ", c);
2921 + }
2922 + printk("\n");
2923 +}
2924 +#endif
2925 +
2926 /*
2927 * Check whether the instruction at regs->nip is a store using
2928 * an update addressing form which will update r1.
2929 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2930 * indicate errors in DSISR but can validly be set in SRR1.
2931 */
2932 if (trap == 0x400)
2933 - error_code &= 0x48200000;
2934 + error_code &= 0x58200000;
2935 else
2936 is_write = error_code & DSISR_ISSTORE;
2937 #else
2938 @@ -259,7 +291,7 @@ good_area:
2939 * "undefined". Of those that can be set, this is the only
2940 * one which seems bad.
2941 */
2942 - if (error_code & 0x10000000)
2943 + if (error_code & DSISR_GUARDED)
2944 /* Guarded storage error. */
2945 goto bad_area;
2946 #endif /* CONFIG_8xx */
2947 @@ -274,7 +306,7 @@ good_area:
2948 * processors use the same I/D cache coherency mechanism
2949 * as embedded.
2950 */
2951 - if (error_code & DSISR_PROTFAULT)
2952 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2953 goto bad_area;
2954 #endif /* CONFIG_PPC_STD_MMU */
2955
2956 @@ -343,6 +375,23 @@ bad_area:
2957 bad_area_nosemaphore:
2958 /* User mode accesses cause a SIGSEGV */
2959 if (user_mode(regs)) {
2960 +
2961 +#ifdef CONFIG_PAX_PAGEEXEC
2962 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2963 +#ifdef CONFIG_PPC_STD_MMU
2964 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2965 +#else
2966 + if (is_exec && regs->nip == address) {
2967 +#endif
2968 + switch (pax_handle_fetch_fault(regs)) {
2969 + }
2970 +
2971 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2972 + do_group_exit(SIGKILL);
2973 + }
2974 + }
2975 +#endif
2976 +
2977 _exception(SIGSEGV, regs, code, address);
2978 return 0;
2979 }
2980 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2981 index 5a783d8..c23e14b 100644
2982 --- a/arch/powerpc/mm/mmap_64.c
2983 +++ b/arch/powerpc/mm/mmap_64.c
2984 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2985 */
2986 if (mmap_is_legacy()) {
2987 mm->mmap_base = TASK_UNMAPPED_BASE;
2988 +
2989 +#ifdef CONFIG_PAX_RANDMMAP
2990 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2991 + mm->mmap_base += mm->delta_mmap;
2992 +#endif
2993 +
2994 mm->get_unmapped_area = arch_get_unmapped_area;
2995 mm->unmap_area = arch_unmap_area;
2996 } else {
2997 mm->mmap_base = mmap_base();
2998 +
2999 +#ifdef CONFIG_PAX_RANDMMAP
3000 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3001 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3002 +#endif
3003 +
3004 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3005 mm->unmap_area = arch_unmap_area_topdown;
3006 }
3007 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3008 index 73709f7..6b90313 100644
3009 --- a/arch/powerpc/mm/slice.c
3010 +++ b/arch/powerpc/mm/slice.c
3011 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3012 if ((mm->task_size - len) < addr)
3013 return 0;
3014 vma = find_vma(mm, addr);
3015 - return (!vma || (addr + len) <= vma->vm_start);
3016 + return check_heap_stack_gap(vma, addr, len);
3017 }
3018
3019 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3020 @@ -256,7 +256,7 @@ full_search:
3021 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3022 continue;
3023 }
3024 - if (!vma || addr + len <= vma->vm_start) {
3025 + if (check_heap_stack_gap(vma, addr, len)) {
3026 /*
3027 * Remember the place where we stopped the search:
3028 */
3029 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3030 }
3031 }
3032
3033 - addr = mm->mmap_base;
3034 - while (addr > len) {
3035 + if (mm->mmap_base < len)
3036 + addr = -ENOMEM;
3037 + else
3038 + addr = mm->mmap_base - len;
3039 +
3040 + while (!IS_ERR_VALUE(addr)) {
3041 /* Go down by chunk size */
3042 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3043 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3044
3045 /* Check for hit with different page size */
3046 mask = slice_range_to_mask(addr, len);
3047 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3048 * return with success:
3049 */
3050 vma = find_vma(mm, addr);
3051 - if (!vma || (addr + len) <= vma->vm_start) {
3052 + if (check_heap_stack_gap(vma, addr, len)) {
3053 /* remember the address as a hint for next time */
3054 if (use_cache)
3055 mm->free_area_cache = addr;
3056 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3057 mm->cached_hole_size = vma->vm_start - addr;
3058
3059 /* try just below the current vma->vm_start */
3060 - addr = vma->vm_start;
3061 + addr = skip_heap_stack_gap(vma, len);
3062 }
3063
3064 /*
3065 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3066 if (fixed && addr > (mm->task_size - len))
3067 return -EINVAL;
3068
3069 +#ifdef CONFIG_PAX_RANDMMAP
3070 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3071 + addr = 0;
3072 +#endif
3073 +
3074 /* If hint, make sure it matches our alignment restrictions */
3075 if (!fixed && addr) {
3076 addr = _ALIGN_UP(addr, 1ul << pshift);
3077 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
3078 index 8517d2a..d2738d4 100644
3079 --- a/arch/s390/include/asm/atomic.h
3080 +++ b/arch/s390/include/asm/atomic.h
3081 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
3082 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
3083 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3084
3085 +#define atomic64_read_unchecked(v) atomic64_read(v)
3086 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3087 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3088 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3089 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3090 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3091 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3092 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3093 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3094 +
3095 #define smp_mb__before_atomic_dec() smp_mb()
3096 #define smp_mb__after_atomic_dec() smp_mb()
3097 #define smp_mb__before_atomic_inc() smp_mb()
3098 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3099 index 547f1a6..0b22b53 100644
3100 --- a/arch/s390/include/asm/elf.h
3101 +++ b/arch/s390/include/asm/elf.h
3102 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
3103 the loader. We need to make sure that it is out of the way of the program
3104 that it will "exec", and that there is sufficient room for the brk. */
3105
3106 -extern unsigned long randomize_et_dyn(unsigned long base);
3107 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
3108 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3109 +
3110 +#ifdef CONFIG_PAX_ASLR
3111 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3112 +
3113 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3114 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3115 +#endif
3116
3117 /* This yields a mask that user programs can use to figure out what
3118 instruction set this CPU supports. */
3119 @@ -211,7 +217,4 @@ struct linux_binprm;
3120 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
3121 int arch_setup_additional_pages(struct linux_binprm *, int);
3122
3123 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3124 -#define arch_randomize_brk arch_randomize_brk
3125 -
3126 #endif
3127 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
3128 index ef573c1..75a1ce6 100644
3129 --- a/arch/s390/include/asm/system.h
3130 +++ b/arch/s390/include/asm/system.h
3131 @@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
3132 extern void (*_machine_halt)(void);
3133 extern void (*_machine_power_off)(void);
3134
3135 -extern unsigned long arch_align_stack(unsigned long sp);
3136 +#define arch_align_stack(x) ((x) & ~0xfUL)
3137
3138 static inline int tprot(unsigned long addr)
3139 {
3140 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3141 index 2b23885..e136e31 100644
3142 --- a/arch/s390/include/asm/uaccess.h
3143 +++ b/arch/s390/include/asm/uaccess.h
3144 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
3145 copy_to_user(void __user *to, const void *from, unsigned long n)
3146 {
3147 might_fault();
3148 +
3149 + if ((long)n < 0)
3150 + return n;
3151 +
3152 if (access_ok(VERIFY_WRITE, to, n))
3153 n = __copy_to_user(to, from, n);
3154 return n;
3155 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3156 static inline unsigned long __must_check
3157 __copy_from_user(void *to, const void __user *from, unsigned long n)
3158 {
3159 + if ((long)n < 0)
3160 + return n;
3161 +
3162 if (__builtin_constant_p(n) && (n <= 256))
3163 return uaccess.copy_from_user_small(n, from, to);
3164 else
3165 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
3166 unsigned int sz = __compiletime_object_size(to);
3167
3168 might_fault();
3169 +
3170 + if ((long)n < 0)
3171 + return n;
3172 +
3173 if (unlikely(sz != -1 && sz < n)) {
3174 copy_from_user_overflow();
3175 return n;
3176 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3177 index dfcb343..eda788a 100644
3178 --- a/arch/s390/kernel/module.c
3179 +++ b/arch/s390/kernel/module.c
3180 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3181
3182 /* Increase core size by size of got & plt and set start
3183 offsets for got and plt. */
3184 - me->core_size = ALIGN(me->core_size, 4);
3185 - me->arch.got_offset = me->core_size;
3186 - me->core_size += me->arch.got_size;
3187 - me->arch.plt_offset = me->core_size;
3188 - me->core_size += me->arch.plt_size;
3189 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3190 + me->arch.got_offset = me->core_size_rw;
3191 + me->core_size_rw += me->arch.got_size;
3192 + me->arch.plt_offset = me->core_size_rx;
3193 + me->core_size_rx += me->arch.plt_size;
3194 return 0;
3195 }
3196
3197 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3198 if (info->got_initialized == 0) {
3199 Elf_Addr *gotent;
3200
3201 - gotent = me->module_core + me->arch.got_offset +
3202 + gotent = me->module_core_rw + me->arch.got_offset +
3203 info->got_offset;
3204 *gotent = val;
3205 info->got_initialized = 1;
3206 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3207 else if (r_type == R_390_GOTENT ||
3208 r_type == R_390_GOTPLTENT)
3209 *(unsigned int *) loc =
3210 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3211 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3212 else if (r_type == R_390_GOT64 ||
3213 r_type == R_390_GOTPLT64)
3214 *(unsigned long *) loc = val;
3215 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3216 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3217 if (info->plt_initialized == 0) {
3218 unsigned int *ip;
3219 - ip = me->module_core + me->arch.plt_offset +
3220 + ip = me->module_core_rx + me->arch.plt_offset +
3221 info->plt_offset;
3222 #ifndef CONFIG_64BIT
3223 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3224 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3225 val - loc + 0xffffUL < 0x1ffffeUL) ||
3226 (r_type == R_390_PLT32DBL &&
3227 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3228 - val = (Elf_Addr) me->module_core +
3229 + val = (Elf_Addr) me->module_core_rx +
3230 me->arch.plt_offset +
3231 info->plt_offset;
3232 val += rela->r_addend - loc;
3233 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3234 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3235 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3236 val = val + rela->r_addend -
3237 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3238 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3239 if (r_type == R_390_GOTOFF16)
3240 *(unsigned short *) loc = val;
3241 else if (r_type == R_390_GOTOFF32)
3242 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3243 break;
3244 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3245 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3246 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3247 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3248 rela->r_addend - loc;
3249 if (r_type == R_390_GOTPC)
3250 *(unsigned int *) loc = val;
3251 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3252 index 9451b21..ed8956f 100644
3253 --- a/arch/s390/kernel/process.c
3254 +++ b/arch/s390/kernel/process.c
3255 @@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3256 }
3257 return 0;
3258 }
3259 -
3260 -unsigned long arch_align_stack(unsigned long sp)
3261 -{
3262 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3263 - sp -= get_random_int() & ~PAGE_MASK;
3264 - return sp & ~0xf;
3265 -}
3266 -
3267 -static inline unsigned long brk_rnd(void)
3268 -{
3269 - /* 8MB for 32bit, 1GB for 64bit */
3270 - if (is_32bit_task())
3271 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3272 - else
3273 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3274 -}
3275 -
3276 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3277 -{
3278 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3279 -
3280 - if (ret < mm->brk)
3281 - return mm->brk;
3282 - return ret;
3283 -}
3284 -
3285 -unsigned long randomize_et_dyn(unsigned long base)
3286 -{
3287 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3288 -
3289 - if (!(current->flags & PF_RANDOMIZE))
3290 - return base;
3291 - if (ret < base)
3292 - return base;
3293 - return ret;
3294 -}
3295 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3296 index f09c748..cf9ec1d 100644
3297 --- a/arch/s390/mm/mmap.c
3298 +++ b/arch/s390/mm/mmap.c
3299 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3300 */
3301 if (mmap_is_legacy()) {
3302 mm->mmap_base = TASK_UNMAPPED_BASE;
3303 +
3304 +#ifdef CONFIG_PAX_RANDMMAP
3305 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3306 + mm->mmap_base += mm->delta_mmap;
3307 +#endif
3308 +
3309 mm->get_unmapped_area = arch_get_unmapped_area;
3310 mm->unmap_area = arch_unmap_area;
3311 } else {
3312 mm->mmap_base = mmap_base();
3313 +
3314 +#ifdef CONFIG_PAX_RANDMMAP
3315 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3316 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3317 +#endif
3318 +
3319 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3320 mm->unmap_area = arch_unmap_area_topdown;
3321 }
3322 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3323 */
3324 if (mmap_is_legacy()) {
3325 mm->mmap_base = TASK_UNMAPPED_BASE;
3326 +
3327 +#ifdef CONFIG_PAX_RANDMMAP
3328 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3329 + mm->mmap_base += mm->delta_mmap;
3330 +#endif
3331 +
3332 mm->get_unmapped_area = s390_get_unmapped_area;
3333 mm->unmap_area = arch_unmap_area;
3334 } else {
3335 mm->mmap_base = mmap_base();
3336 +
3337 +#ifdef CONFIG_PAX_RANDMMAP
3338 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3339 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3340 +#endif
3341 +
3342 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3343 mm->unmap_area = arch_unmap_area_topdown;
3344 }
3345 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3346 index 589d5c7..669e274 100644
3347 --- a/arch/score/include/asm/system.h
3348 +++ b/arch/score/include/asm/system.h
3349 @@ -17,7 +17,7 @@ do { \
3350 #define finish_arch_switch(prev) do {} while (0)
3351
3352 typedef void (*vi_handler_t)(void);
3353 -extern unsigned long arch_align_stack(unsigned long sp);
3354 +#define arch_align_stack(x) (x)
3355
3356 #define mb() barrier()
3357 #define rmb() barrier()
3358 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3359 index 25d0803..d6c8e36 100644
3360 --- a/arch/score/kernel/process.c
3361 +++ b/arch/score/kernel/process.c
3362 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3363
3364 return task_pt_regs(task)->cp0_epc;
3365 }
3366 -
3367 -unsigned long arch_align_stack(unsigned long sp)
3368 -{
3369 - return sp;
3370 -}
3371 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3372 index afeb710..d1d1289 100644
3373 --- a/arch/sh/mm/mmap.c
3374 +++ b/arch/sh/mm/mmap.c
3375 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3376 addr = PAGE_ALIGN(addr);
3377
3378 vma = find_vma(mm, addr);
3379 - if (TASK_SIZE - len >= addr &&
3380 - (!vma || addr + len <= vma->vm_start))
3381 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3382 return addr;
3383 }
3384
3385 @@ -106,7 +105,7 @@ full_search:
3386 }
3387 return -ENOMEM;
3388 }
3389 - if (likely(!vma || addr + len <= vma->vm_start)) {
3390 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3391 /*
3392 * Remember the place where we stopped the search:
3393 */
3394 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3395 addr = PAGE_ALIGN(addr);
3396
3397 vma = find_vma(mm, addr);
3398 - if (TASK_SIZE - len >= addr &&
3399 - (!vma || addr + len <= vma->vm_start))
3400 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3401 return addr;
3402 }
3403
3404 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3405 /* make sure it can fit in the remaining address space */
3406 if (likely(addr > len)) {
3407 vma = find_vma(mm, addr-len);
3408 - if (!vma || addr <= vma->vm_start) {
3409 + if (check_heap_stack_gap(vma, addr - len, len)) {
3410 /* remember the address as a hint for next time */
3411 return (mm->free_area_cache = addr-len);
3412 }
3413 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3414 if (unlikely(mm->mmap_base < len))
3415 goto bottomup;
3416
3417 - addr = mm->mmap_base-len;
3418 - if (do_colour_align)
3419 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3420 + addr = mm->mmap_base - len;
3421
3422 do {
3423 + if (do_colour_align)
3424 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3425 /*
3426 * Lookup failure means no vma is above this address,
3427 * else if new region fits below vma->vm_start,
3428 * return with success:
3429 */
3430 vma = find_vma(mm, addr);
3431 - if (likely(!vma || addr+len <= vma->vm_start)) {
3432 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3433 /* remember the address as a hint for next time */
3434 return (mm->free_area_cache = addr);
3435 }
3436 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3437 mm->cached_hole_size = vma->vm_start - addr;
3438
3439 /* try just below the current vma->vm_start */
3440 - addr = vma->vm_start-len;
3441 - if (do_colour_align)
3442 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3443 - } while (likely(len < vma->vm_start));
3444 + addr = skip_heap_stack_gap(vma, len);
3445 + } while (!IS_ERR_VALUE(addr));
3446
3447 bottomup:
3448 /*
3449 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
3450 index f92602e..27060b2 100644
3451 --- a/arch/sparc/Kconfig
3452 +++ b/arch/sparc/Kconfig
3453 @@ -31,6 +31,7 @@ config SPARC
3454
3455 config SPARC32
3456 def_bool !64BIT
3457 + select GENERIC_ATOMIC64
3458
3459 config SPARC64
3460 def_bool 64BIT
3461 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3462 index ad1fb5d..fc5315b 100644
3463 --- a/arch/sparc/Makefile
3464 +++ b/arch/sparc/Makefile
3465 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3466 # Export what is needed by arch/sparc/boot/Makefile
3467 export VMLINUX_INIT VMLINUX_MAIN
3468 VMLINUX_INIT := $(head-y) $(init-y)
3469 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3470 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3471 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3472 VMLINUX_MAIN += $(drivers-y) $(net-y)
3473
3474 diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
3475 index 8ff83d8..4a459c2 100644
3476 --- a/arch/sparc/include/asm/atomic.h
3477 +++ b/arch/sparc/include/asm/atomic.h
3478 @@ -4,5 +4,6 @@
3479 #include <asm/atomic_64.h>
3480 #else
3481 #include <asm/atomic_32.h>
3482 +#include <asm-generic/atomic64.h>
3483 #endif
3484 #endif
3485 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3486 index 9f421df..b81fc12 100644
3487 --- a/arch/sparc/include/asm/atomic_64.h
3488 +++ b/arch/sparc/include/asm/atomic_64.h
3489 @@ -14,18 +14,40 @@
3490 #define ATOMIC64_INIT(i) { (i) }
3491
3492 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3493 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3494 +{
3495 + return v->counter;
3496 +}
3497 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3498 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3499 +{
3500 + return v->counter;
3501 +}
3502
3503 #define atomic_set(v, i) (((v)->counter) = i)
3504 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3505 +{
3506 + v->counter = i;
3507 +}
3508 #define atomic64_set(v, i) (((v)->counter) = i)
3509 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3510 +{
3511 + v->counter = i;
3512 +}
3513
3514 extern void atomic_add(int, atomic_t *);
3515 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3516 extern void atomic64_add(long, atomic64_t *);
3517 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3518 extern void atomic_sub(int, atomic_t *);
3519 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3520 extern void atomic64_sub(long, atomic64_t *);
3521 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3522
3523 extern int atomic_add_ret(int, atomic_t *);
3524 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3525 extern long atomic64_add_ret(long, atomic64_t *);
3526 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3527 extern int atomic_sub_ret(int, atomic_t *);
3528 extern long atomic64_sub_ret(long, atomic64_t *);
3529
3530 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3531 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3532
3533 #define atomic_inc_return(v) atomic_add_ret(1, v)
3534 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3535 +{
3536 + return atomic_add_ret_unchecked(1, v);
3537 +}
3538 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3539 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3540 +{
3541 + return atomic64_add_ret_unchecked(1, v);
3542 +}
3543
3544 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3545 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3546
3547 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3548 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3549 +{
3550 + return atomic_add_ret_unchecked(i, v);
3551 +}
3552 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3553 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3554 +{
3555 + return atomic64_add_ret_unchecked(i, v);
3556 +}
3557
3558 /*
3559 * atomic_inc_and_test - increment and test
3560 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3561 * other cases.
3562 */
3563 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3564 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3565 +{
3566 + return atomic_inc_return_unchecked(v) == 0;
3567 +}
3568 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3569
3570 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3571 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3572 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3573
3574 #define atomic_inc(v) atomic_add(1, v)
3575 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3576 +{
3577 + atomic_add_unchecked(1, v);
3578 +}
3579 #define atomic64_inc(v) atomic64_add(1, v)
3580 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3581 +{
3582 + atomic64_add_unchecked(1, v);
3583 +}
3584
3585 #define atomic_dec(v) atomic_sub(1, v)
3586 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3587 +{
3588 + atomic_sub_unchecked(1, v);
3589 +}
3590 #define atomic64_dec(v) atomic64_sub(1, v)
3591 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3592 +{
3593 + atomic64_sub_unchecked(1, v);
3594 +}
3595
3596 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3597 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3598
3599 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3600 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3601 +{
3602 + return cmpxchg(&v->counter, old, new);
3603 +}
3604 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3605 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3606 +{
3607 + return xchg(&v->counter, new);
3608 +}
3609
3610 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3611 {
3612 - int c, old;
3613 + int c, old, new;
3614 c = atomic_read(v);
3615 for (;;) {
3616 - if (unlikely(c == (u)))
3617 + if (unlikely(c == u))
3618 break;
3619 - old = atomic_cmpxchg((v), c, c + (a));
3620 +
3621 + asm volatile("addcc %2, %0, %0\n"
3622 +
3623 +#ifdef CONFIG_PAX_REFCOUNT
3624 + "tvs %%icc, 6\n"
3625 +#endif
3626 +
3627 + : "=r" (new)
3628 + : "0" (c), "ir" (a)
3629 + : "cc");
3630 +
3631 + old = atomic_cmpxchg(v, c, new);
3632 if (likely(old == c))
3633 break;
3634 c = old;
3635 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3636 #define atomic64_cmpxchg(v, o, n) \
3637 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3638 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3639 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3640 +{
3641 + return xchg(&v->counter, new);
3642 +}
3643
3644 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3645 {
3646 - long c, old;
3647 + long c, old, new;
3648 c = atomic64_read(v);
3649 for (;;) {
3650 - if (unlikely(c == (u)))
3651 + if (unlikely(c == u))
3652 break;
3653 - old = atomic64_cmpxchg((v), c, c + (a));
3654 +
3655 + asm volatile("addcc %2, %0, %0\n"
3656 +
3657 +#ifdef CONFIG_PAX_REFCOUNT
3658 + "tvs %%xcc, 6\n"
3659 +#endif
3660 +
3661 + : "=r" (new)
3662 + : "0" (c), "ir" (a)
3663 + : "cc");
3664 +
3665 + old = atomic64_cmpxchg(v, c, new);
3666 if (likely(old == c))
3667 break;
3668 c = old;
3669 }
3670 - return c != (u);
3671 + return c != u;
3672 }
3673
3674 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3675 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3676 index 69358b5..17b4745 100644
3677 --- a/arch/sparc/include/asm/cache.h
3678 +++ b/arch/sparc/include/asm/cache.h
3679 @@ -10,7 +10,7 @@
3680 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3681
3682 #define L1_CACHE_SHIFT 5
3683 -#define L1_CACHE_BYTES 32
3684 +#define L1_CACHE_BYTES 32UL
3685
3686 #ifdef CONFIG_SPARC32
3687 #define SMP_CACHE_BYTES_SHIFT 5
3688 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3689 index 4269ca6..e3da77f 100644
3690 --- a/arch/sparc/include/asm/elf_32.h
3691 +++ b/arch/sparc/include/asm/elf_32.h
3692 @@ -114,6 +114,13 @@ typedef struct {
3693
3694 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3695
3696 +#ifdef CONFIG_PAX_ASLR
3697 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3698 +
3699 +#define PAX_DELTA_MMAP_LEN 16
3700 +#define PAX_DELTA_STACK_LEN 16
3701 +#endif
3702 +
3703 /* This yields a mask that user programs can use to figure out what
3704 instruction set this cpu supports. This can NOT be done in userspace
3705 on Sparc. */
3706 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3707 index 7df8b7f..4946269 100644
3708 --- a/arch/sparc/include/asm/elf_64.h
3709 +++ b/arch/sparc/include/asm/elf_64.h
3710 @@ -180,6 +180,13 @@ typedef struct {
3711 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3712 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3713
3714 +#ifdef CONFIG_PAX_ASLR
3715 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3716 +
3717 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3718 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3719 +#endif
3720 +
3721 extern unsigned long sparc64_elf_hwcap;
3722 #define ELF_HWCAP sparc64_elf_hwcap
3723
3724 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
3725 index 156707b..aefa786 100644
3726 --- a/arch/sparc/include/asm/page_32.h
3727 +++ b/arch/sparc/include/asm/page_32.h
3728 @@ -8,6 +8,8 @@
3729 #ifndef _SPARC_PAGE_H
3730 #define _SPARC_PAGE_H
3731
3732 +#include <linux/const.h>
3733 +
3734 #define PAGE_SHIFT 12
3735
3736 #ifndef __ASSEMBLY__
3737 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3738 index a790cc6..091ed94 100644
3739 --- a/arch/sparc/include/asm/pgtable_32.h
3740 +++ b/arch/sparc/include/asm/pgtable_32.h
3741 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3742 BTFIXUPDEF_INT(page_none)
3743 BTFIXUPDEF_INT(page_copy)
3744 BTFIXUPDEF_INT(page_readonly)
3745 +
3746 +#ifdef CONFIG_PAX_PAGEEXEC
3747 +BTFIXUPDEF_INT(page_shared_noexec)
3748 +BTFIXUPDEF_INT(page_copy_noexec)
3749 +BTFIXUPDEF_INT(page_readonly_noexec)
3750 +#endif
3751 +
3752 BTFIXUPDEF_INT(page_kernel)
3753
3754 #define PMD_SHIFT SUN4C_PMD_SHIFT
3755 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3756 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3757 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3758
3759 +#ifdef CONFIG_PAX_PAGEEXEC
3760 +extern pgprot_t PAGE_SHARED_NOEXEC;
3761 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3762 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3763 +#else
3764 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3765 +# define PAGE_COPY_NOEXEC PAGE_COPY
3766 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3767 +#endif
3768 +
3769 extern unsigned long page_kernel;
3770
3771 #ifdef MODULE
3772 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3773 index f6ae2b2..b03ffc7 100644
3774 --- a/arch/sparc/include/asm/pgtsrmmu.h
3775 +++ b/arch/sparc/include/asm/pgtsrmmu.h
3776 @@ -115,6 +115,13 @@
3777 SRMMU_EXEC | SRMMU_REF)
3778 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3779 SRMMU_EXEC | SRMMU_REF)
3780 +
3781 +#ifdef CONFIG_PAX_PAGEEXEC
3782 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3783 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3784 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3785 +#endif
3786 +
3787 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3788 SRMMU_DIRTY | SRMMU_REF)
3789
3790 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3791 index 9689176..63c18ea 100644
3792 --- a/arch/sparc/include/asm/spinlock_64.h
3793 +++ b/arch/sparc/include/asm/spinlock_64.h
3794 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3795
3796 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3797
3798 -static void inline arch_read_lock(arch_rwlock_t *lock)
3799 +static inline void arch_read_lock(arch_rwlock_t *lock)
3800 {
3801 unsigned long tmp1, tmp2;
3802
3803 __asm__ __volatile__ (
3804 "1: ldsw [%2], %0\n"
3805 " brlz,pn %0, 2f\n"
3806 -"4: add %0, 1, %1\n"
3807 +"4: addcc %0, 1, %1\n"
3808 +
3809 +#ifdef CONFIG_PAX_REFCOUNT
3810 +" tvs %%icc, 6\n"
3811 +#endif
3812 +
3813 " cas [%2], %0, %1\n"
3814 " cmp %0, %1\n"
3815 " bne,pn %%icc, 1b\n"
3816 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3817 " .previous"
3818 : "=&r" (tmp1), "=&r" (tmp2)
3819 : "r" (lock)
3820 - : "memory");
3821 + : "memory", "cc");
3822 }
3823
3824 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3825 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3826 {
3827 int tmp1, tmp2;
3828
3829 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3830 "1: ldsw [%2], %0\n"
3831 " brlz,a,pn %0, 2f\n"
3832 " mov 0, %0\n"
3833 -" add %0, 1, %1\n"
3834 +" addcc %0, 1, %1\n"
3835 +
3836 +#ifdef CONFIG_PAX_REFCOUNT
3837 +" tvs %%icc, 6\n"
3838 +#endif
3839 +
3840 " cas [%2], %0, %1\n"
3841 " cmp %0, %1\n"
3842 " bne,pn %%icc, 1b\n"
3843 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3844 return tmp1;
3845 }
3846
3847 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3848 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3849 {
3850 unsigned long tmp1, tmp2;
3851
3852 __asm__ __volatile__(
3853 "1: lduw [%2], %0\n"
3854 -" sub %0, 1, %1\n"
3855 +" subcc %0, 1, %1\n"
3856 +
3857 +#ifdef CONFIG_PAX_REFCOUNT
3858 +" tvs %%icc, 6\n"
3859 +#endif
3860 +
3861 " cas [%2], %0, %1\n"
3862 " cmp %0, %1\n"
3863 " bne,pn %%xcc, 1b\n"
3864 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3865 : "memory");
3866 }
3867
3868 -static void inline arch_write_lock(arch_rwlock_t *lock)
3869 +static inline void arch_write_lock(arch_rwlock_t *lock)
3870 {
3871 unsigned long mask, tmp1, tmp2;
3872
3873 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3874 : "memory");
3875 }
3876
3877 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3878 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3879 {
3880 __asm__ __volatile__(
3881 " stw %%g0, [%0]"
3882 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3883 : "memory");
3884 }
3885
3886 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3887 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3888 {
3889 unsigned long mask, tmp1, tmp2, result;
3890
3891 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3892 index fa57532..e1a4c53 100644
3893 --- a/arch/sparc/include/asm/thread_info_32.h
3894 +++ b/arch/sparc/include/asm/thread_info_32.h
3895 @@ -50,6 +50,8 @@ struct thread_info {
3896 unsigned long w_saved;
3897
3898 struct restart_block restart_block;
3899 +
3900 + unsigned long lowest_stack;
3901 };
3902
3903 /*
3904 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3905 index 60d86be..952dea1 100644
3906 --- a/arch/sparc/include/asm/thread_info_64.h
3907 +++ b/arch/sparc/include/asm/thread_info_64.h
3908 @@ -63,6 +63,8 @@ struct thread_info {
3909 struct pt_regs *kern_una_regs;
3910 unsigned int kern_una_insn;
3911
3912 + unsigned long lowest_stack;
3913 +
3914 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3915 };
3916
3917 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3918 index e88fbe5..96b0ce5 100644
3919 --- a/arch/sparc/include/asm/uaccess.h
3920 +++ b/arch/sparc/include/asm/uaccess.h
3921 @@ -1,5 +1,13 @@
3922 #ifndef ___ASM_SPARC_UACCESS_H
3923 #define ___ASM_SPARC_UACCESS_H
3924 +
3925 +#ifdef __KERNEL__
3926 +#ifndef __ASSEMBLY__
3927 +#include <linux/types.h>
3928 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3929 +#endif
3930 +#endif
3931 +
3932 #if defined(__sparc__) && defined(__arch64__)
3933 #include <asm/uaccess_64.h>
3934 #else
3935 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3936 index 8303ac4..07f333d 100644
3937 --- a/arch/sparc/include/asm/uaccess_32.h
3938 +++ b/arch/sparc/include/asm/uaccess_32.h
3939 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3940
3941 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3942 {
3943 - if (n && __access_ok((unsigned long) to, n))
3944 + if ((long)n < 0)
3945 + return n;
3946 +
3947 + if (n && __access_ok((unsigned long) to, n)) {
3948 + if (!__builtin_constant_p(n))
3949 + check_object_size(from, n, true);
3950 return __copy_user(to, (__force void __user *) from, n);
3951 - else
3952 + } else
3953 return n;
3954 }
3955
3956 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3957 {
3958 + if ((long)n < 0)
3959 + return n;
3960 +
3961 + if (!__builtin_constant_p(n))
3962 + check_object_size(from, n, true);
3963 +
3964 return __copy_user(to, (__force void __user *) from, n);
3965 }
3966
3967 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3968 {
3969 - if (n && __access_ok((unsigned long) from, n))
3970 + if ((long)n < 0)
3971 + return n;
3972 +
3973 + if (n && __access_ok((unsigned long) from, n)) {
3974 + if (!__builtin_constant_p(n))
3975 + check_object_size(to, n, false);
3976 return __copy_user((__force void __user *) to, from, n);
3977 - else
3978 + } else
3979 return n;
3980 }
3981
3982 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3983 {
3984 + if ((long)n < 0)
3985 + return n;
3986 +
3987 return __copy_user((__force void __user *) to, from, n);
3988 }
3989
3990 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3991 index 3e1449f..5293a0e 100644
3992 --- a/arch/sparc/include/asm/uaccess_64.h
3993 +++ b/arch/sparc/include/asm/uaccess_64.h
3994 @@ -10,6 +10,7 @@
3995 #include <linux/compiler.h>
3996 #include <linux/string.h>
3997 #include <linux/thread_info.h>
3998 +#include <linux/kernel.h>
3999 #include <asm/asi.h>
4000 #include <asm/system.h>
4001 #include <asm/spitfire.h>
4002 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4003 static inline unsigned long __must_check
4004 copy_from_user(void *to, const void __user *from, unsigned long size)
4005 {
4006 - unsigned long ret = ___copy_from_user(to, from, size);
4007 + unsigned long ret;
4008
4009 + if ((long)size < 0 || size > INT_MAX)
4010 + return size;
4011 +
4012 + if (!__builtin_constant_p(size))
4013 + check_object_size(to, size, false);
4014 +
4015 + ret = ___copy_from_user(to, from, size);
4016 if (unlikely(ret))
4017 ret = copy_from_user_fixup(to, from, size);
4018
4019 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4020 static inline unsigned long __must_check
4021 copy_to_user(void __user *to, const void *from, unsigned long size)
4022 {
4023 - unsigned long ret = ___copy_to_user(to, from, size);
4024 + unsigned long ret;
4025
4026 + if ((long)size < 0 || size > INT_MAX)
4027 + return size;
4028 +
4029 + if (!__builtin_constant_p(size))
4030 + check_object_size(from, size, true);
4031 +
4032 + ret = ___copy_to_user(to, from, size);
4033 if (unlikely(ret))
4034 ret = copy_to_user_fixup(to, from, size);
4035 return ret;
4036 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4037 index cb85458..e063f17 100644
4038 --- a/arch/sparc/kernel/Makefile
4039 +++ b/arch/sparc/kernel/Makefile
4040 @@ -3,7 +3,7 @@
4041 #
4042
4043 asflags-y := -ansi
4044 -ccflags-y := -Werror
4045 +#ccflags-y := -Werror
4046
4047 extra-y := head_$(BITS).o
4048 extra-y += init_task.o
4049 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4050 index f793742..4d880af 100644
4051 --- a/arch/sparc/kernel/process_32.c
4052 +++ b/arch/sparc/kernel/process_32.c
4053 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
4054 rw->ins[4], rw->ins[5],
4055 rw->ins[6],
4056 rw->ins[7]);
4057 - printk("%pS\n", (void *) rw->ins[7]);
4058 + printk("%pA\n", (void *) rw->ins[7]);
4059 rw = (struct reg_window32 *) rw->ins[6];
4060 }
4061 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4062 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
4063
4064 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4065 r->psr, r->pc, r->npc, r->y, print_tainted());
4066 - printk("PC: <%pS>\n", (void *) r->pc);
4067 + printk("PC: <%pA>\n", (void *) r->pc);
4068 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4069 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4070 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4071 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4072 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4073 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4074 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4075 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4076
4077 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4078 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4079 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4080 rw = (struct reg_window32 *) fp;
4081 pc = rw->ins[7];
4082 printk("[%08lx : ", pc);
4083 - printk("%pS ] ", (void *) pc);
4084 + printk("%pA ] ", (void *) pc);
4085 fp = rw->ins[6];
4086 } while (++count < 16);
4087 printk("\n");
4088 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4089 index 3739a06..48b2ff0 100644
4090 --- a/arch/sparc/kernel/process_64.c
4091 +++ b/arch/sparc/kernel/process_64.c
4092 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4093 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4094 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4095 if (regs->tstate & TSTATE_PRIV)
4096 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4097 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4098 }
4099
4100 void show_regs(struct pt_regs *regs)
4101 {
4102 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4103 regs->tpc, regs->tnpc, regs->y, print_tainted());
4104 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4105 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4106 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4107 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4108 regs->u_regs[3]);
4109 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4110 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4111 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4112 regs->u_regs[15]);
4113 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4114 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4115 show_regwindow(regs);
4116 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
4117 }
4118 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
4119 ((tp && tp->task) ? tp->task->pid : -1));
4120
4121 if (gp->tstate & TSTATE_PRIV) {
4122 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4123 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4124 (void *) gp->tpc,
4125 (void *) gp->o7,
4126 (void *) gp->i7,
4127 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
4128 index 42b282f..28ce9f2 100644
4129 --- a/arch/sparc/kernel/sys_sparc_32.c
4130 +++ b/arch/sparc/kernel/sys_sparc_32.c
4131 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4132 if (ARCH_SUN4C && len > 0x20000000)
4133 return -ENOMEM;
4134 if (!addr)
4135 - addr = TASK_UNMAPPED_BASE;
4136 + addr = current->mm->mmap_base;
4137
4138 if (flags & MAP_SHARED)
4139 addr = COLOUR_ALIGN(addr);
4140 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4141 }
4142 if (TASK_SIZE - PAGE_SIZE - len < addr)
4143 return -ENOMEM;
4144 - if (!vmm || addr + len <= vmm->vm_start)
4145 + if (check_heap_stack_gap(vmm, addr, len))
4146 return addr;
4147 addr = vmm->vm_end;
4148 if (flags & MAP_SHARED)
4149 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
4150 index 441521a..b767073 100644
4151 --- a/arch/sparc/kernel/sys_sparc_64.c
4152 +++ b/arch/sparc/kernel/sys_sparc_64.c
4153 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4154 /* We do not accept a shared mapping if it would violate
4155 * cache aliasing constraints.
4156 */
4157 - if ((flags & MAP_SHARED) &&
4158 + if ((filp || (flags & MAP_SHARED)) &&
4159 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4160 return -EINVAL;
4161 return addr;
4162 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4163 if (filp || (flags & MAP_SHARED))
4164 do_color_align = 1;
4165
4166 +#ifdef CONFIG_PAX_RANDMMAP
4167 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4168 +#endif
4169 +
4170 if (addr) {
4171 if (do_color_align)
4172 addr = COLOUR_ALIGN(addr, pgoff);
4173 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4174 addr = PAGE_ALIGN(addr);
4175
4176 vma = find_vma(mm, addr);
4177 - if (task_size - len >= addr &&
4178 - (!vma || addr + len <= vma->vm_start))
4179 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4180 return addr;
4181 }
4182
4183 if (len > mm->cached_hole_size) {
4184 - start_addr = addr = mm->free_area_cache;
4185 + start_addr = addr = mm->free_area_cache;
4186 } else {
4187 - start_addr = addr = TASK_UNMAPPED_BASE;
4188 + start_addr = addr = mm->mmap_base;
4189 mm->cached_hole_size = 0;
4190 }
4191
4192 @@ -174,14 +177,14 @@ full_search:
4193 vma = find_vma(mm, VA_EXCLUDE_END);
4194 }
4195 if (unlikely(task_size < addr)) {
4196 - if (start_addr != TASK_UNMAPPED_BASE) {
4197 - start_addr = addr = TASK_UNMAPPED_BASE;
4198 + if (start_addr != mm->mmap_base) {
4199 + start_addr = addr = mm->mmap_base;
4200 mm->cached_hole_size = 0;
4201 goto full_search;
4202 }
4203 return -ENOMEM;
4204 }
4205 - if (likely(!vma || addr + len <= vma->vm_start)) {
4206 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4207 /*
4208 * Remember the place where we stopped the search:
4209 */
4210 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4211 /* We do not accept a shared mapping if it would violate
4212 * cache aliasing constraints.
4213 */
4214 - if ((flags & MAP_SHARED) &&
4215 + if ((filp || (flags & MAP_SHARED)) &&
4216 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4217 return -EINVAL;
4218 return addr;
4219 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4220 addr = PAGE_ALIGN(addr);
4221
4222 vma = find_vma(mm, addr);
4223 - if (task_size - len >= addr &&
4224 - (!vma || addr + len <= vma->vm_start))
4225 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4226 return addr;
4227 }
4228
4229 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4230 /* make sure it can fit in the remaining address space */
4231 if (likely(addr > len)) {
4232 vma = find_vma(mm, addr-len);
4233 - if (!vma || addr <= vma->vm_start) {
4234 + if (check_heap_stack_gap(vma, addr - len, len)) {
4235 /* remember the address as a hint for next time */
4236 return (mm->free_area_cache = addr-len);
4237 }
4238 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4239 if (unlikely(mm->mmap_base < len))
4240 goto bottomup;
4241
4242 - addr = mm->mmap_base-len;
4243 - if (do_color_align)
4244 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4245 + addr = mm->mmap_base - len;
4246
4247 do {
4248 + if (do_color_align)
4249 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4250 /*
4251 * Lookup failure means no vma is above this address,
4252 * else if new region fits below vma->vm_start,
4253 * return with success:
4254 */
4255 vma = find_vma(mm, addr);
4256 - if (likely(!vma || addr+len <= vma->vm_start)) {
4257 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4258 /* remember the address as a hint for next time */
4259 return (mm->free_area_cache = addr);
4260 }
4261 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4262 mm->cached_hole_size = vma->vm_start - addr;
4263
4264 /* try just below the current vma->vm_start */
4265 - addr = vma->vm_start-len;
4266 - if (do_color_align)
4267 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4268 - } while (likely(len < vma->vm_start));
4269 + addr = skip_heap_stack_gap(vma, len);
4270 + } while (!IS_ERR_VALUE(addr));
4271
4272 bottomup:
4273 /*
4274 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4275 gap == RLIM_INFINITY ||
4276 sysctl_legacy_va_layout) {
4277 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4278 +
4279 +#ifdef CONFIG_PAX_RANDMMAP
4280 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4281 + mm->mmap_base += mm->delta_mmap;
4282 +#endif
4283 +
4284 mm->get_unmapped_area = arch_get_unmapped_area;
4285 mm->unmap_area = arch_unmap_area;
4286 } else {
4287 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4288 gap = (task_size / 6 * 5);
4289
4290 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4291 +
4292 +#ifdef CONFIG_PAX_RANDMMAP
4293 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4294 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4295 +#endif
4296 +
4297 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4298 mm->unmap_area = arch_unmap_area_topdown;
4299 }
4300 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4301 index 591f20c..0f1b925 100644
4302 --- a/arch/sparc/kernel/traps_32.c
4303 +++ b/arch/sparc/kernel/traps_32.c
4304 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4305 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4306 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4307
4308 +extern void gr_handle_kernel_exploit(void);
4309 +
4310 void die_if_kernel(char *str, struct pt_regs *regs)
4311 {
4312 static int die_counter;
4313 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4314 count++ < 30 &&
4315 (((unsigned long) rw) >= PAGE_OFFSET) &&
4316 !(((unsigned long) rw) & 0x7)) {
4317 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4318 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4319 (void *) rw->ins[7]);
4320 rw = (struct reg_window32 *)rw->ins[6];
4321 }
4322 }
4323 printk("Instruction DUMP:");
4324 instruction_dump ((unsigned long *) regs->pc);
4325 - if(regs->psr & PSR_PS)
4326 + if(regs->psr & PSR_PS) {
4327 + gr_handle_kernel_exploit();
4328 do_exit(SIGKILL);
4329 + }
4330 do_exit(SIGSEGV);
4331 }
4332
4333 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4334 index 0cbdaa4..438e4c9 100644
4335 --- a/arch/sparc/kernel/traps_64.c
4336 +++ b/arch/sparc/kernel/traps_64.c
4337 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4338 i + 1,
4339 p->trapstack[i].tstate, p->trapstack[i].tpc,
4340 p->trapstack[i].tnpc, p->trapstack[i].tt);
4341 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4342 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4343 }
4344 }
4345
4346 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4347
4348 lvl -= 0x100;
4349 if (regs->tstate & TSTATE_PRIV) {
4350 +
4351 +#ifdef CONFIG_PAX_REFCOUNT
4352 + if (lvl == 6)
4353 + pax_report_refcount_overflow(regs);
4354 +#endif
4355 +
4356 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4357 die_if_kernel(buffer, regs);
4358 }
4359 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4360 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4361 {
4362 char buffer[32];
4363 -
4364 +
4365 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4366 0, lvl, SIGTRAP) == NOTIFY_STOP)
4367 return;
4368
4369 +#ifdef CONFIG_PAX_REFCOUNT
4370 + if (lvl == 6)
4371 + pax_report_refcount_overflow(regs);
4372 +#endif
4373 +
4374 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4375
4376 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4377 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4378 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4379 printk("%s" "ERROR(%d): ",
4380 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4381 - printk("TPC<%pS>\n", (void *) regs->tpc);
4382 + printk("TPC<%pA>\n", (void *) regs->tpc);
4383 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4384 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4385 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4386 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4387 smp_processor_id(),
4388 (type & 0x1) ? 'I' : 'D',
4389 regs->tpc);
4390 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4391 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4392 panic("Irrecoverable Cheetah+ parity error.");
4393 }
4394
4395 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4396 smp_processor_id(),
4397 (type & 0x1) ? 'I' : 'D',
4398 regs->tpc);
4399 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4400 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4401 }
4402
4403 struct sun4v_error_entry {
4404 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4405
4406 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4407 regs->tpc, tl);
4408 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4409 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4410 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4411 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4412 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4413 (void *) regs->u_regs[UREG_I7]);
4414 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4415 "pte[%lx] error[%lx]\n",
4416 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4417
4418 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4419 regs->tpc, tl);
4420 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4421 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4422 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4423 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4424 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4425 (void *) regs->u_regs[UREG_I7]);
4426 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4427 "pte[%lx] error[%lx]\n",
4428 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4429 fp = (unsigned long)sf->fp + STACK_BIAS;
4430 }
4431
4432 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4433 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4434 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4435 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4436 int index = tsk->curr_ret_stack;
4437 if (tsk->ret_stack && index >= graph) {
4438 pc = tsk->ret_stack[index - graph].ret;
4439 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4440 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4441 graph++;
4442 }
4443 }
4444 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4445 return (struct reg_window *) (fp + STACK_BIAS);
4446 }
4447
4448 +extern void gr_handle_kernel_exploit(void);
4449 +
4450 void die_if_kernel(char *str, struct pt_regs *regs)
4451 {
4452 static int die_counter;
4453 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4454 while (rw &&
4455 count++ < 30 &&
4456 kstack_valid(tp, (unsigned long) rw)) {
4457 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4458 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4459 (void *) rw->ins[7]);
4460
4461 rw = kernel_stack_up(rw);
4462 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4463 }
4464 user_instruction_dump ((unsigned int __user *) regs->tpc);
4465 }
4466 - if (regs->tstate & TSTATE_PRIV)
4467 + if (regs->tstate & TSTATE_PRIV) {
4468 + gr_handle_kernel_exploit();
4469 do_exit(SIGKILL);
4470 + }
4471 do_exit(SIGSEGV);
4472 }
4473 EXPORT_SYMBOL(die_if_kernel);
4474 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4475 index 76e4ac1..78f8bb1 100644
4476 --- a/arch/sparc/kernel/unaligned_64.c
4477 +++ b/arch/sparc/kernel/unaligned_64.c
4478 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4479 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4480
4481 if (__ratelimit(&ratelimit)) {
4482 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4483 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4484 regs->tpc, (void *) regs->tpc);
4485 }
4486 }
4487 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4488 index a3fc437..fea9957 100644
4489 --- a/arch/sparc/lib/Makefile
4490 +++ b/arch/sparc/lib/Makefile
4491 @@ -2,7 +2,7 @@
4492 #
4493
4494 asflags-y := -ansi -DST_DIV0=0x02
4495 -ccflags-y := -Werror
4496 +#ccflags-y := -Werror
4497
4498 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4499 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4500 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4501 index 59186e0..f747d7a 100644
4502 --- a/arch/sparc/lib/atomic_64.S
4503 +++ b/arch/sparc/lib/atomic_64.S
4504 @@ -18,7 +18,12 @@
4505 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4506 BACKOFF_SETUP(%o2)
4507 1: lduw [%o1], %g1
4508 - add %g1, %o0, %g7
4509 + addcc %g1, %o0, %g7
4510 +
4511 +#ifdef CONFIG_PAX_REFCOUNT
4512 + tvs %icc, 6
4513 +#endif
4514 +
4515 cas [%o1], %g1, %g7
4516 cmp %g1, %g7
4517 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4518 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4519 2: BACKOFF_SPIN(%o2, %o3, 1b)
4520 .size atomic_add, .-atomic_add
4521
4522 + .globl atomic_add_unchecked
4523 + .type atomic_add_unchecked,#function
4524 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4525 + BACKOFF_SETUP(%o2)
4526 +1: lduw [%o1], %g1
4527 + add %g1, %o0, %g7
4528 + cas [%o1], %g1, %g7
4529 + cmp %g1, %g7
4530 + bne,pn %icc, 2f
4531 + nop
4532 + retl
4533 + nop
4534 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4535 + .size atomic_add_unchecked, .-atomic_add_unchecked
4536 +
4537 .globl atomic_sub
4538 .type atomic_sub,#function
4539 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4540 BACKOFF_SETUP(%o2)
4541 1: lduw [%o1], %g1
4542 - sub %g1, %o0, %g7
4543 + subcc %g1, %o0, %g7
4544 +
4545 +#ifdef CONFIG_PAX_REFCOUNT
4546 + tvs %icc, 6
4547 +#endif
4548 +
4549 cas [%o1], %g1, %g7
4550 cmp %g1, %g7
4551 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4552 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4553 2: BACKOFF_SPIN(%o2, %o3, 1b)
4554 .size atomic_sub, .-atomic_sub
4555
4556 + .globl atomic_sub_unchecked
4557 + .type atomic_sub_unchecked,#function
4558 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4559 + BACKOFF_SETUP(%o2)
4560 +1: lduw [%o1], %g1
4561 + sub %g1, %o0, %g7
4562 + cas [%o1], %g1, %g7
4563 + cmp %g1, %g7
4564 + bne,pn %icc, 2f
4565 + nop
4566 + retl
4567 + nop
4568 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4569 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4570 +
4571 .globl atomic_add_ret
4572 .type atomic_add_ret,#function
4573 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4574 BACKOFF_SETUP(%o2)
4575 1: lduw [%o1], %g1
4576 - add %g1, %o0, %g7
4577 + addcc %g1, %o0, %g7
4578 +
4579 +#ifdef CONFIG_PAX_REFCOUNT
4580 + tvs %icc, 6
4581 +#endif
4582 +
4583 cas [%o1], %g1, %g7
4584 cmp %g1, %g7
4585 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4586 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4587 2: BACKOFF_SPIN(%o2, %o3, 1b)
4588 .size atomic_add_ret, .-atomic_add_ret
4589
4590 + .globl atomic_add_ret_unchecked
4591 + .type atomic_add_ret_unchecked,#function
4592 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4593 + BACKOFF_SETUP(%o2)
4594 +1: lduw [%o1], %g1
4595 + addcc %g1, %o0, %g7
4596 + cas [%o1], %g1, %g7
4597 + cmp %g1, %g7
4598 + bne,pn %icc, 2f
4599 + add %g7, %o0, %g7
4600 + sra %g7, 0, %o0
4601 + retl
4602 + nop
4603 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4604 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4605 +
4606 .globl atomic_sub_ret
4607 .type atomic_sub_ret,#function
4608 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4609 BACKOFF_SETUP(%o2)
4610 1: lduw [%o1], %g1
4611 - sub %g1, %o0, %g7
4612 + subcc %g1, %o0, %g7
4613 +
4614 +#ifdef CONFIG_PAX_REFCOUNT
4615 + tvs %icc, 6
4616 +#endif
4617 +
4618 cas [%o1], %g1, %g7
4619 cmp %g1, %g7
4620 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4621 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4622 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4623 BACKOFF_SETUP(%o2)
4624 1: ldx [%o1], %g1
4625 - add %g1, %o0, %g7
4626 + addcc %g1, %o0, %g7
4627 +
4628 +#ifdef CONFIG_PAX_REFCOUNT
4629 + tvs %xcc, 6
4630 +#endif
4631 +
4632 casx [%o1], %g1, %g7
4633 cmp %g1, %g7
4634 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4635 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4636 2: BACKOFF_SPIN(%o2, %o3, 1b)
4637 .size atomic64_add, .-atomic64_add
4638
4639 + .globl atomic64_add_unchecked
4640 + .type atomic64_add_unchecked,#function
4641 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4642 + BACKOFF_SETUP(%o2)
4643 +1: ldx [%o1], %g1
4644 + addcc %g1, %o0, %g7
4645 + casx [%o1], %g1, %g7
4646 + cmp %g1, %g7
4647 + bne,pn %xcc, 2f
4648 + nop
4649 + retl
4650 + nop
4651 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4652 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4653 +
4654 .globl atomic64_sub
4655 .type atomic64_sub,#function
4656 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4657 BACKOFF_SETUP(%o2)
4658 1: ldx [%o1], %g1
4659 - sub %g1, %o0, %g7
4660 + subcc %g1, %o0, %g7
4661 +
4662 +#ifdef CONFIG_PAX_REFCOUNT
4663 + tvs %xcc, 6
4664 +#endif
4665 +
4666 casx [%o1], %g1, %g7
4667 cmp %g1, %g7
4668 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4669 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4670 2: BACKOFF_SPIN(%o2, %o3, 1b)
4671 .size atomic64_sub, .-atomic64_sub
4672
4673 + .globl atomic64_sub_unchecked
4674 + .type atomic64_sub_unchecked,#function
4675 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4676 + BACKOFF_SETUP(%o2)
4677 +1: ldx [%o1], %g1
4678 + subcc %g1, %o0, %g7
4679 + casx [%o1], %g1, %g7
4680 + cmp %g1, %g7
4681 + bne,pn %xcc, 2f
4682 + nop
4683 + retl
4684 + nop
4685 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4686 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4687 +
4688 .globl atomic64_add_ret
4689 .type atomic64_add_ret,#function
4690 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4691 BACKOFF_SETUP(%o2)
4692 1: ldx [%o1], %g1
4693 - add %g1, %o0, %g7
4694 + addcc %g1, %o0, %g7
4695 +
4696 +#ifdef CONFIG_PAX_REFCOUNT
4697 + tvs %xcc, 6
4698 +#endif
4699 +
4700 casx [%o1], %g1, %g7
4701 cmp %g1, %g7
4702 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4703 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4704 2: BACKOFF_SPIN(%o2, %o3, 1b)
4705 .size atomic64_add_ret, .-atomic64_add_ret
4706
4707 + .globl atomic64_add_ret_unchecked
4708 + .type atomic64_add_ret_unchecked,#function
4709 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4710 + BACKOFF_SETUP(%o2)
4711 +1: ldx [%o1], %g1
4712 + addcc %g1, %o0, %g7
4713 + casx [%o1], %g1, %g7
4714 + cmp %g1, %g7
4715 + bne,pn %xcc, 2f
4716 + add %g7, %o0, %g7
4717 + mov %g7, %o0
4718 + retl
4719 + nop
4720 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4721 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4722 +
4723 .globl atomic64_sub_ret
4724 .type atomic64_sub_ret,#function
4725 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4726 BACKOFF_SETUP(%o2)
4727 1: ldx [%o1], %g1
4728 - sub %g1, %o0, %g7
4729 + subcc %g1, %o0, %g7
4730 +
4731 +#ifdef CONFIG_PAX_REFCOUNT
4732 + tvs %xcc, 6
4733 +#endif
4734 +
4735 casx [%o1], %g1, %g7
4736 cmp %g1, %g7
4737 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4738 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4739 index 1b30bb3..b4a16c7 100644
4740 --- a/arch/sparc/lib/ksyms.c
4741 +++ b/arch/sparc/lib/ksyms.c
4742 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4743
4744 /* Atomic counter implementation. */
4745 EXPORT_SYMBOL(atomic_add);
4746 +EXPORT_SYMBOL(atomic_add_unchecked);
4747 EXPORT_SYMBOL(atomic_add_ret);
4748 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4749 EXPORT_SYMBOL(atomic_sub);
4750 +EXPORT_SYMBOL(atomic_sub_unchecked);
4751 EXPORT_SYMBOL(atomic_sub_ret);
4752 EXPORT_SYMBOL(atomic64_add);
4753 +EXPORT_SYMBOL(atomic64_add_unchecked);
4754 EXPORT_SYMBOL(atomic64_add_ret);
4755 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4756 EXPORT_SYMBOL(atomic64_sub);
4757 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4758 EXPORT_SYMBOL(atomic64_sub_ret);
4759
4760 /* Atomic bit operations. */
4761 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4762 index 301421c..e2535d1 100644
4763 --- a/arch/sparc/mm/Makefile
4764 +++ b/arch/sparc/mm/Makefile
4765 @@ -2,7 +2,7 @@
4766 #
4767
4768 asflags-y := -ansi
4769 -ccflags-y := -Werror
4770 +#ccflags-y := -Werror
4771
4772 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4773 obj-y += fault_$(BITS).o
4774 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4775 index 8023fd7..c8e89e9 100644
4776 --- a/arch/sparc/mm/fault_32.c
4777 +++ b/arch/sparc/mm/fault_32.c
4778 @@ -21,6 +21,9 @@
4779 #include <linux/perf_event.h>
4780 #include <linux/interrupt.h>
4781 #include <linux/kdebug.h>
4782 +#include <linux/slab.h>
4783 +#include <linux/pagemap.h>
4784 +#include <linux/compiler.h>
4785
4786 #include <asm/system.h>
4787 #include <asm/page.h>
4788 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4789 return safe_compute_effective_address(regs, insn);
4790 }
4791
4792 +#ifdef CONFIG_PAX_PAGEEXEC
4793 +#ifdef CONFIG_PAX_DLRESOLVE
4794 +static void pax_emuplt_close(struct vm_area_struct *vma)
4795 +{
4796 + vma->vm_mm->call_dl_resolve = 0UL;
4797 +}
4798 +
4799 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4800 +{
4801 + unsigned int *kaddr;
4802 +
4803 + vmf->page = alloc_page(GFP_HIGHUSER);
4804 + if (!vmf->page)
4805 + return VM_FAULT_OOM;
4806 +
4807 + kaddr = kmap(vmf->page);
4808 + memset(kaddr, 0, PAGE_SIZE);
4809 + kaddr[0] = 0x9DE3BFA8U; /* save */
4810 + flush_dcache_page(vmf->page);
4811 + kunmap(vmf->page);
4812 + return VM_FAULT_MAJOR;
4813 +}
4814 +
4815 +static const struct vm_operations_struct pax_vm_ops = {
4816 + .close = pax_emuplt_close,
4817 + .fault = pax_emuplt_fault
4818 +};
4819 +
4820 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4821 +{
4822 + int ret;
4823 +
4824 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4825 + vma->vm_mm = current->mm;
4826 + vma->vm_start = addr;
4827 + vma->vm_end = addr + PAGE_SIZE;
4828 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4829 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4830 + vma->vm_ops = &pax_vm_ops;
4831 +
4832 + ret = insert_vm_struct(current->mm, vma);
4833 + if (ret)
4834 + return ret;
4835 +
4836 + ++current->mm->total_vm;
4837 + return 0;
4838 +}
4839 +#endif
4840 +
4841 +/*
4842 + * PaX: decide what to do with offenders (regs->pc = fault address)
4843 + *
4844 + * returns 1 when task should be killed
4845 + * 2 when patched PLT trampoline was detected
4846 + * 3 when unpatched PLT trampoline was detected
4847 + */
4848 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4849 +{
4850 +
4851 +#ifdef CONFIG_PAX_EMUPLT
4852 + int err;
4853 +
4854 + do { /* PaX: patched PLT emulation #1 */
4855 + unsigned int sethi1, sethi2, jmpl;
4856 +
4857 + err = get_user(sethi1, (unsigned int *)regs->pc);
4858 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4859 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4860 +
4861 + if (err)
4862 + break;
4863 +
4864 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4865 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4866 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4867 + {
4868 + unsigned int addr;
4869 +
4870 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4871 + addr = regs->u_regs[UREG_G1];
4872 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4873 + regs->pc = addr;
4874 + regs->npc = addr+4;
4875 + return 2;
4876 + }
4877 + } while (0);
4878 +
4879 + { /* PaX: patched PLT emulation #2 */
4880 + unsigned int ba;
4881 +
4882 + err = get_user(ba, (unsigned int *)regs->pc);
4883 +
4884 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4885 + unsigned int addr;
4886 +
4887 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4888 + regs->pc = addr;
4889 + regs->npc = addr+4;
4890 + return 2;
4891 + }
4892 + }
4893 +
4894 + do { /* PaX: patched PLT emulation #3 */
4895 + unsigned int sethi, jmpl, nop;
4896 +
4897 + err = get_user(sethi, (unsigned int *)regs->pc);
4898 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4899 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4900 +
4901 + if (err)
4902 + break;
4903 +
4904 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4905 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4906 + nop == 0x01000000U)
4907 + {
4908 + unsigned int addr;
4909 +
4910 + addr = (sethi & 0x003FFFFFU) << 10;
4911 + regs->u_regs[UREG_G1] = addr;
4912 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4913 + regs->pc = addr;
4914 + regs->npc = addr+4;
4915 + return 2;
4916 + }
4917 + } while (0);
4918 +
4919 + do { /* PaX: unpatched PLT emulation step 1 */
4920 + unsigned int sethi, ba, nop;
4921 +
4922 + err = get_user(sethi, (unsigned int *)regs->pc);
4923 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4924 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4925 +
4926 + if (err)
4927 + break;
4928 +
4929 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4930 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4931 + nop == 0x01000000U)
4932 + {
4933 + unsigned int addr, save, call;
4934 +
4935 + if ((ba & 0xFFC00000U) == 0x30800000U)
4936 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4937 + else
4938 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4939 +
4940 + err = get_user(save, (unsigned int *)addr);
4941 + err |= get_user(call, (unsigned int *)(addr+4));
4942 + err |= get_user(nop, (unsigned int *)(addr+8));
4943 + if (err)
4944 + break;
4945 +
4946 +#ifdef CONFIG_PAX_DLRESOLVE
4947 + if (save == 0x9DE3BFA8U &&
4948 + (call & 0xC0000000U) == 0x40000000U &&
4949 + nop == 0x01000000U)
4950 + {
4951 + struct vm_area_struct *vma;
4952 + unsigned long call_dl_resolve;
4953 +
4954 + down_read(&current->mm->mmap_sem);
4955 + call_dl_resolve = current->mm->call_dl_resolve;
4956 + up_read(&current->mm->mmap_sem);
4957 + if (likely(call_dl_resolve))
4958 + goto emulate;
4959 +
4960 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4961 +
4962 + down_write(&current->mm->mmap_sem);
4963 + if (current->mm->call_dl_resolve) {
4964 + call_dl_resolve = current->mm->call_dl_resolve;
4965 + up_write(&current->mm->mmap_sem);
4966 + if (vma)
4967 + kmem_cache_free(vm_area_cachep, vma);
4968 + goto emulate;
4969 + }
4970 +
4971 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4972 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4973 + up_write(&current->mm->mmap_sem);
4974 + if (vma)
4975 + kmem_cache_free(vm_area_cachep, vma);
4976 + return 1;
4977 + }
4978 +
4979 + if (pax_insert_vma(vma, call_dl_resolve)) {
4980 + up_write(&current->mm->mmap_sem);
4981 + kmem_cache_free(vm_area_cachep, vma);
4982 + return 1;
4983 + }
4984 +
4985 + current->mm->call_dl_resolve = call_dl_resolve;
4986 + up_write(&current->mm->mmap_sem);
4987 +
4988 +emulate:
4989 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4990 + regs->pc = call_dl_resolve;
4991 + regs->npc = addr+4;
4992 + return 3;
4993 + }
4994 +#endif
4995 +
4996 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4997 + if ((save & 0xFFC00000U) == 0x05000000U &&
4998 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4999 + nop == 0x01000000U)
5000 + {
5001 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5002 + regs->u_regs[UREG_G2] = addr + 4;
5003 + addr = (save & 0x003FFFFFU) << 10;
5004 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5005 + regs->pc = addr;
5006 + regs->npc = addr+4;
5007 + return 3;
5008 + }
5009 + }
5010 + } while (0);
5011 +
5012 + do { /* PaX: unpatched PLT emulation step 2 */
5013 + unsigned int save, call, nop;
5014 +
5015 + err = get_user(save, (unsigned int *)(regs->pc-4));
5016 + err |= get_user(call, (unsigned int *)regs->pc);
5017 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5018 + if (err)
5019 + break;
5020 +
5021 + if (save == 0x9DE3BFA8U &&
5022 + (call & 0xC0000000U) == 0x40000000U &&
5023 + nop == 0x01000000U)
5024 + {
5025 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5026 +
5027 + regs->u_regs[UREG_RETPC] = regs->pc;
5028 + regs->pc = dl_resolve;
5029 + regs->npc = dl_resolve+4;
5030 + return 3;
5031 + }
5032 + } while (0);
5033 +#endif
5034 +
5035 + return 1;
5036 +}
5037 +
5038 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5039 +{
5040 + unsigned long i;
5041 +
5042 + printk(KERN_ERR "PAX: bytes at PC: ");
5043 + for (i = 0; i < 8; i++) {
5044 + unsigned int c;
5045 + if (get_user(c, (unsigned int *)pc+i))
5046 + printk(KERN_CONT "???????? ");
5047 + else
5048 + printk(KERN_CONT "%08x ", c);
5049 + }
5050 + printk("\n");
5051 +}
5052 +#endif
5053 +
5054 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
5055 int text_fault)
5056 {
5057 @@ -280,6 +545,24 @@ good_area:
5058 if(!(vma->vm_flags & VM_WRITE))
5059 goto bad_area;
5060 } else {
5061 +
5062 +#ifdef CONFIG_PAX_PAGEEXEC
5063 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5064 + up_read(&mm->mmap_sem);
5065 + switch (pax_handle_fetch_fault(regs)) {
5066 +
5067 +#ifdef CONFIG_PAX_EMUPLT
5068 + case 2:
5069 + case 3:
5070 + return;
5071 +#endif
5072 +
5073 + }
5074 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5075 + do_group_exit(SIGKILL);
5076 + }
5077 +#endif
5078 +
5079 /* Allow reads even for write-only mappings */
5080 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5081 goto bad_area;
5082 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
5083 index 504c062..6fcb9c6 100644
5084 --- a/arch/sparc/mm/fault_64.c
5085 +++ b/arch/sparc/mm/fault_64.c
5086 @@ -21,6 +21,9 @@
5087 #include <linux/kprobes.h>
5088 #include <linux/kdebug.h>
5089 #include <linux/percpu.h>
5090 +#include <linux/slab.h>
5091 +#include <linux/pagemap.h>
5092 +#include <linux/compiler.h>
5093
5094 #include <asm/page.h>
5095 #include <asm/pgtable.h>
5096 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
5097 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5098 regs->tpc);
5099 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5100 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5101 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5102 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5103 dump_stack();
5104 unhandled_fault(regs->tpc, current, regs);
5105 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
5106 show_regs(regs);
5107 }
5108
5109 +#ifdef CONFIG_PAX_PAGEEXEC
5110 +#ifdef CONFIG_PAX_DLRESOLVE
5111 +static void pax_emuplt_close(struct vm_area_struct *vma)
5112 +{
5113 + vma->vm_mm->call_dl_resolve = 0UL;
5114 +}
5115 +
5116 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5117 +{
5118 + unsigned int *kaddr;
5119 +
5120 + vmf->page = alloc_page(GFP_HIGHUSER);
5121 + if (!vmf->page)
5122 + return VM_FAULT_OOM;
5123 +
5124 + kaddr = kmap(vmf->page);
5125 + memset(kaddr, 0, PAGE_SIZE);
5126 + kaddr[0] = 0x9DE3BFA8U; /* save */
5127 + flush_dcache_page(vmf->page);
5128 + kunmap(vmf->page);
5129 + return VM_FAULT_MAJOR;
5130 +}
5131 +
5132 +static const struct vm_operations_struct pax_vm_ops = {
5133 + .close = pax_emuplt_close,
5134 + .fault = pax_emuplt_fault
5135 +};
5136 +
5137 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5138 +{
5139 + int ret;
5140 +
5141 + INIT_LIST_HEAD(&vma->anon_vma_chain);
5142 + vma->vm_mm = current->mm;
5143 + vma->vm_start = addr;
5144 + vma->vm_end = addr + PAGE_SIZE;
5145 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5146 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5147 + vma->vm_ops = &pax_vm_ops;
5148 +
5149 + ret = insert_vm_struct(current->mm, vma);
5150 + if (ret)
5151 + return ret;
5152 +
5153 + ++current->mm->total_vm;
5154 + return 0;
5155 +}
5156 +#endif
5157 +
5158 +/*
5159 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5160 + *
5161 + * returns 1 when task should be killed
5162 + * 2 when patched PLT trampoline was detected
5163 + * 3 when unpatched PLT trampoline was detected
5164 + */
5165 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5166 +{
5167 +
5168 +#ifdef CONFIG_PAX_EMUPLT
5169 + int err;
5170 +
5171 + do { /* PaX: patched PLT emulation #1 */
5172 + unsigned int sethi1, sethi2, jmpl;
5173 +
5174 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5175 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5176 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5177 +
5178 + if (err)
5179 + break;
5180 +
5181 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5182 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5183 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5184 + {
5185 + unsigned long addr;
5186 +
5187 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5188 + addr = regs->u_regs[UREG_G1];
5189 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5190 +
5191 + if (test_thread_flag(TIF_32BIT))
5192 + addr &= 0xFFFFFFFFUL;
5193 +
5194 + regs->tpc = addr;
5195 + regs->tnpc = addr+4;
5196 + return 2;
5197 + }
5198 + } while (0);
5199 +
5200 + { /* PaX: patched PLT emulation #2 */
5201 + unsigned int ba;
5202 +
5203 + err = get_user(ba, (unsigned int *)regs->tpc);
5204 +
5205 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5206 + unsigned long addr;
5207 +
5208 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5209 +
5210 + if (test_thread_flag(TIF_32BIT))
5211 + addr &= 0xFFFFFFFFUL;
5212 +
5213 + regs->tpc = addr;
5214 + regs->tnpc = addr+4;
5215 + return 2;
5216 + }
5217 + }
5218 +
5219 + do { /* PaX: patched PLT emulation #3 */
5220 + unsigned int sethi, jmpl, nop;
5221 +
5222 + err = get_user(sethi, (unsigned int *)regs->tpc);
5223 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5224 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5225 +
5226 + if (err)
5227 + break;
5228 +
5229 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5230 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5231 + nop == 0x01000000U)
5232 + {
5233 + unsigned long addr;
5234 +
5235 + addr = (sethi & 0x003FFFFFU) << 10;
5236 + regs->u_regs[UREG_G1] = addr;
5237 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5238 +
5239 + if (test_thread_flag(TIF_32BIT))
5240 + addr &= 0xFFFFFFFFUL;
5241 +
5242 + regs->tpc = addr;
5243 + regs->tnpc = addr+4;
5244 + return 2;
5245 + }
5246 + } while (0);
5247 +
5248 + do { /* PaX: patched PLT emulation #4 */
5249 + unsigned int sethi, mov1, call, mov2;
5250 +
5251 + err = get_user(sethi, (unsigned int *)regs->tpc);
5252 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5253 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5254 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5255 +
5256 + if (err)
5257 + break;
5258 +
5259 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5260 + mov1 == 0x8210000FU &&
5261 + (call & 0xC0000000U) == 0x40000000U &&
5262 + mov2 == 0x9E100001U)
5263 + {
5264 + unsigned long addr;
5265 +
5266 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5267 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5268 +
5269 + if (test_thread_flag(TIF_32BIT))
5270 + addr &= 0xFFFFFFFFUL;
5271 +
5272 + regs->tpc = addr;
5273 + regs->tnpc = addr+4;
5274 + return 2;
5275 + }
5276 + } while (0);
5277 +
5278 + do { /* PaX: patched PLT emulation #5 */
5279 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5280 +
5281 + err = get_user(sethi, (unsigned int *)regs->tpc);
5282 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5283 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5284 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5285 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5286 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5287 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5288 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5289 +
5290 + if (err)
5291 + break;
5292 +
5293 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5294 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5295 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5296 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5297 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5298 + sllx == 0x83287020U &&
5299 + jmpl == 0x81C04005U &&
5300 + nop == 0x01000000U)
5301 + {
5302 + unsigned long addr;
5303 +
5304 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5305 + regs->u_regs[UREG_G1] <<= 32;
5306 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5307 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5308 + regs->tpc = addr;
5309 + regs->tnpc = addr+4;
5310 + return 2;
5311 + }
5312 + } while (0);
5313 +
5314 + do { /* PaX: patched PLT emulation #6 */
5315 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5316 +
5317 + err = get_user(sethi, (unsigned int *)regs->tpc);
5318 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5319 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5320 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5321 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5322 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5323 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5324 +
5325 + if (err)
5326 + break;
5327 +
5328 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5329 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5330 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5331 + sllx == 0x83287020U &&
5332 + (or & 0xFFFFE000U) == 0x8A116000U &&
5333 + jmpl == 0x81C04005U &&
5334 + nop == 0x01000000U)
5335 + {
5336 + unsigned long addr;
5337 +
5338 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5339 + regs->u_regs[UREG_G1] <<= 32;
5340 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5341 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5342 + regs->tpc = addr;
5343 + regs->tnpc = addr+4;
5344 + return 2;
5345 + }
5346 + } while (0);
5347 +
5348 + do { /* PaX: unpatched PLT emulation step 1 */
5349 + unsigned int sethi, ba, nop;
5350 +
5351 + err = get_user(sethi, (unsigned int *)regs->tpc);
5352 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5353 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5354 +
5355 + if (err)
5356 + break;
5357 +
5358 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5359 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5360 + nop == 0x01000000U)
5361 + {
5362 + unsigned long addr;
5363 + unsigned int save, call;
5364 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5365 +
5366 + if ((ba & 0xFFC00000U) == 0x30800000U)
5367 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5368 + else
5369 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5370 +
5371 + if (test_thread_flag(TIF_32BIT))
5372 + addr &= 0xFFFFFFFFUL;
5373 +
5374 + err = get_user(save, (unsigned int *)addr);
5375 + err |= get_user(call, (unsigned int *)(addr+4));
5376 + err |= get_user(nop, (unsigned int *)(addr+8));
5377 + if (err)
5378 + break;
5379 +
5380 +#ifdef CONFIG_PAX_DLRESOLVE
5381 + if (save == 0x9DE3BFA8U &&
5382 + (call & 0xC0000000U) == 0x40000000U &&
5383 + nop == 0x01000000U)
5384 + {
5385 + struct vm_area_struct *vma;
5386 + unsigned long call_dl_resolve;
5387 +
5388 + down_read(&current->mm->mmap_sem);
5389 + call_dl_resolve = current->mm->call_dl_resolve;
5390 + up_read(&current->mm->mmap_sem);
5391 + if (likely(call_dl_resolve))
5392 + goto emulate;
5393 +
5394 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5395 +
5396 + down_write(&current->mm->mmap_sem);
5397 + if (current->mm->call_dl_resolve) {
5398 + call_dl_resolve = current->mm->call_dl_resolve;
5399 + up_write(&current->mm->mmap_sem);
5400 + if (vma)
5401 + kmem_cache_free(vm_area_cachep, vma);
5402 + goto emulate;
5403 + }
5404 +
5405 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5406 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5407 + up_write(&current->mm->mmap_sem);
5408 + if (vma)
5409 + kmem_cache_free(vm_area_cachep, vma);
5410 + return 1;
5411 + }
5412 +
5413 + if (pax_insert_vma(vma, call_dl_resolve)) {
5414 + up_write(&current->mm->mmap_sem);
5415 + kmem_cache_free(vm_area_cachep, vma);
5416 + return 1;
5417 + }
5418 +
5419 + current->mm->call_dl_resolve = call_dl_resolve;
5420 + up_write(&current->mm->mmap_sem);
5421 +
5422 +emulate:
5423 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5424 + regs->tpc = call_dl_resolve;
5425 + regs->tnpc = addr+4;
5426 + return 3;
5427 + }
5428 +#endif
5429 +
5430 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5431 + if ((save & 0xFFC00000U) == 0x05000000U &&
5432 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5433 + nop == 0x01000000U)
5434 + {
5435 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5436 + regs->u_regs[UREG_G2] = addr + 4;
5437 + addr = (save & 0x003FFFFFU) << 10;
5438 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5439 +
5440 + if (test_thread_flag(TIF_32BIT))
5441 + addr &= 0xFFFFFFFFUL;
5442 +
5443 + regs->tpc = addr;
5444 + regs->tnpc = addr+4;
5445 + return 3;
5446 + }
5447 +
5448 + /* PaX: 64-bit PLT stub */
5449 + err = get_user(sethi1, (unsigned int *)addr);
5450 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5451 + err |= get_user(or1, (unsigned int *)(addr+8));
5452 + err |= get_user(or2, (unsigned int *)(addr+12));
5453 + err |= get_user(sllx, (unsigned int *)(addr+16));
5454 + err |= get_user(add, (unsigned int *)(addr+20));
5455 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5456 + err |= get_user(nop, (unsigned int *)(addr+28));
5457 + if (err)
5458 + break;
5459 +
5460 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5461 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5462 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5463 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5464 + sllx == 0x89293020U &&
5465 + add == 0x8A010005U &&
5466 + jmpl == 0x89C14000U &&
5467 + nop == 0x01000000U)
5468 + {
5469 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5470 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5471 + regs->u_regs[UREG_G4] <<= 32;
5472 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5473 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5474 + regs->u_regs[UREG_G4] = addr + 24;
5475 + addr = regs->u_regs[UREG_G5];
5476 + regs->tpc = addr;
5477 + regs->tnpc = addr+4;
5478 + return 3;
5479 + }
5480 + }
5481 + } while (0);
5482 +
5483 +#ifdef CONFIG_PAX_DLRESOLVE
5484 + do { /* PaX: unpatched PLT emulation step 2 */
5485 + unsigned int save, call, nop;
5486 +
5487 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5488 + err |= get_user(call, (unsigned int *)regs->tpc);
5489 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5490 + if (err)
5491 + break;
5492 +
5493 + if (save == 0x9DE3BFA8U &&
5494 + (call & 0xC0000000U) == 0x40000000U &&
5495 + nop == 0x01000000U)
5496 + {
5497 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5498 +
5499 + if (test_thread_flag(TIF_32BIT))
5500 + dl_resolve &= 0xFFFFFFFFUL;
5501 +
5502 + regs->u_regs[UREG_RETPC] = regs->tpc;
5503 + regs->tpc = dl_resolve;
5504 + regs->tnpc = dl_resolve+4;
5505 + return 3;
5506 + }
5507 + } while (0);
5508 +#endif
5509 +
5510 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5511 + unsigned int sethi, ba, nop;
5512 +
5513 + err = get_user(sethi, (unsigned int *)regs->tpc);
5514 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5515 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5516 +
5517 + if (err)
5518 + break;
5519 +
5520 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5521 + (ba & 0xFFF00000U) == 0x30600000U &&
5522 + nop == 0x01000000U)
5523 + {
5524 + unsigned long addr;
5525 +
5526 + addr = (sethi & 0x003FFFFFU) << 10;
5527 + regs->u_regs[UREG_G1] = addr;
5528 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5529 +
5530 + if (test_thread_flag(TIF_32BIT))
5531 + addr &= 0xFFFFFFFFUL;
5532 +
5533 + regs->tpc = addr;
5534 + regs->tnpc = addr+4;
5535 + return 2;
5536 + }
5537 + } while (0);
5538 +
5539 +#endif
5540 +
5541 + return 1;
5542 +}
5543 +
5544 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5545 +{
5546 + unsigned long i;
5547 +
5548 + printk(KERN_ERR "PAX: bytes at PC: ");
5549 + for (i = 0; i < 8; i++) {
5550 + unsigned int c;
5551 + if (get_user(c, (unsigned int *)pc+i))
5552 + printk(KERN_CONT "???????? ");
5553 + else
5554 + printk(KERN_CONT "%08x ", c);
5555 + }
5556 + printk("\n");
5557 +}
5558 +#endif
5559 +
5560 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5561 {
5562 struct mm_struct *mm = current->mm;
5563 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5564 if (!vma)
5565 goto bad_area;
5566
5567 +#ifdef CONFIG_PAX_PAGEEXEC
5568 + /* PaX: detect ITLB misses on non-exec pages */
5569 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5570 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5571 + {
5572 + if (address != regs->tpc)
5573 + goto good_area;
5574 +
5575 + up_read(&mm->mmap_sem);
5576 + switch (pax_handle_fetch_fault(regs)) {
5577 +
5578 +#ifdef CONFIG_PAX_EMUPLT
5579 + case 2:
5580 + case 3:
5581 + return;
5582 +#endif
5583 +
5584 + }
5585 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5586 + do_group_exit(SIGKILL);
5587 + }
5588 +#endif
5589 +
5590 /* Pure DTLB misses do not tell us whether the fault causing
5591 * load/store/atomic was a write or not, it only says that there
5592 * was no match. So in such a case we (carefully) read the
5593 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5594 index 07e1453..0a7d9e9 100644
5595 --- a/arch/sparc/mm/hugetlbpage.c
5596 +++ b/arch/sparc/mm/hugetlbpage.c
5597 @@ -67,7 +67,7 @@ full_search:
5598 }
5599 return -ENOMEM;
5600 }
5601 - if (likely(!vma || addr + len <= vma->vm_start)) {
5602 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5603 /*
5604 * Remember the place where we stopped the search:
5605 */
5606 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5607 /* make sure it can fit in the remaining address space */
5608 if (likely(addr > len)) {
5609 vma = find_vma(mm, addr-len);
5610 - if (!vma || addr <= vma->vm_start) {
5611 + if (check_heap_stack_gap(vma, addr - len, len)) {
5612 /* remember the address as a hint for next time */
5613 return (mm->free_area_cache = addr-len);
5614 }
5615 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5616 if (unlikely(mm->mmap_base < len))
5617 goto bottomup;
5618
5619 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5620 + addr = mm->mmap_base - len;
5621
5622 do {
5623 + addr &= HPAGE_MASK;
5624 /*
5625 * Lookup failure means no vma is above this address,
5626 * else if new region fits below vma->vm_start,
5627 * return with success:
5628 */
5629 vma = find_vma(mm, addr);
5630 - if (likely(!vma || addr+len <= vma->vm_start)) {
5631 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5632 /* remember the address as a hint for next time */
5633 return (mm->free_area_cache = addr);
5634 }
5635 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5636 mm->cached_hole_size = vma->vm_start - addr;
5637
5638 /* try just below the current vma->vm_start */
5639 - addr = (vma->vm_start-len) & HPAGE_MASK;
5640 - } while (likely(len < vma->vm_start));
5641 + addr = skip_heap_stack_gap(vma, len);
5642 + } while (!IS_ERR_VALUE(addr));
5643
5644 bottomup:
5645 /*
5646 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5647 if (addr) {
5648 addr = ALIGN(addr, HPAGE_SIZE);
5649 vma = find_vma(mm, addr);
5650 - if (task_size - len >= addr &&
5651 - (!vma || addr + len <= vma->vm_start))
5652 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5653 return addr;
5654 }
5655 if (mm->get_unmapped_area == arch_get_unmapped_area)
5656 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5657 index 7b00de6..78239f4 100644
5658 --- a/arch/sparc/mm/init_32.c
5659 +++ b/arch/sparc/mm/init_32.c
5660 @@ -316,6 +316,9 @@ extern void device_scan(void);
5661 pgprot_t PAGE_SHARED __read_mostly;
5662 EXPORT_SYMBOL(PAGE_SHARED);
5663
5664 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5665 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5666 +
5667 void __init paging_init(void)
5668 {
5669 switch(sparc_cpu_model) {
5670 @@ -344,17 +347,17 @@ void __init paging_init(void)
5671
5672 /* Initialize the protection map with non-constant, MMU dependent values. */
5673 protection_map[0] = PAGE_NONE;
5674 - protection_map[1] = PAGE_READONLY;
5675 - protection_map[2] = PAGE_COPY;
5676 - protection_map[3] = PAGE_COPY;
5677 + protection_map[1] = PAGE_READONLY_NOEXEC;
5678 + protection_map[2] = PAGE_COPY_NOEXEC;
5679 + protection_map[3] = PAGE_COPY_NOEXEC;
5680 protection_map[4] = PAGE_READONLY;
5681 protection_map[5] = PAGE_READONLY;
5682 protection_map[6] = PAGE_COPY;
5683 protection_map[7] = PAGE_COPY;
5684 protection_map[8] = PAGE_NONE;
5685 - protection_map[9] = PAGE_READONLY;
5686 - protection_map[10] = PAGE_SHARED;
5687 - protection_map[11] = PAGE_SHARED;
5688 + protection_map[9] = PAGE_READONLY_NOEXEC;
5689 + protection_map[10] = PAGE_SHARED_NOEXEC;
5690 + protection_map[11] = PAGE_SHARED_NOEXEC;
5691 protection_map[12] = PAGE_READONLY;
5692 protection_map[13] = PAGE_READONLY;
5693 protection_map[14] = PAGE_SHARED;
5694 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5695 index cbef74e..c38fead 100644
5696 --- a/arch/sparc/mm/srmmu.c
5697 +++ b/arch/sparc/mm/srmmu.c
5698 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5699 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5700 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5701 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5702 +
5703 +#ifdef CONFIG_PAX_PAGEEXEC
5704 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5705 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5706 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5707 +#endif
5708 +
5709 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5710 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5711
5712 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
5713 index 27fe667..36d474c 100644
5714 --- a/arch/tile/include/asm/atomic_64.h
5715 +++ b/arch/tile/include/asm/atomic_64.h
5716 @@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5717
5718 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5719
5720 +#define atomic64_read_unchecked(v) atomic64_read(v)
5721 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5722 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5723 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5724 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5725 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5726 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5727 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5728 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5729 +
5730 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
5731 #define smp_mb__before_atomic_dec() smp_mb()
5732 #define smp_mb__after_atomic_dec() smp_mb()
5733 diff --git a/arch/um/Makefile b/arch/um/Makefile
5734 index 7730af6..cce5b19 100644
5735 --- a/arch/um/Makefile
5736 +++ b/arch/um/Makefile
5737 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5738 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5739 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5740
5741 +ifdef CONSTIFY_PLUGIN
5742 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5743 +endif
5744 +
5745 #This will adjust *FLAGS accordingly to the platform.
5746 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5747
5748 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5749 index 6c03acd..a5e0215 100644
5750 --- a/arch/um/include/asm/kmap_types.h
5751 +++ b/arch/um/include/asm/kmap_types.h
5752 @@ -23,6 +23,7 @@ enum km_type {
5753 KM_IRQ1,
5754 KM_SOFTIRQ0,
5755 KM_SOFTIRQ1,
5756 + KM_CLEARPAGE,
5757 KM_TYPE_NR
5758 };
5759
5760 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5761 index 7cfc3ce..cbd1a58 100644
5762 --- a/arch/um/include/asm/page.h
5763 +++ b/arch/um/include/asm/page.h
5764 @@ -14,6 +14,9 @@
5765 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5766 #define PAGE_MASK (~(PAGE_SIZE-1))
5767
5768 +#define ktla_ktva(addr) (addr)
5769 +#define ktva_ktla(addr) (addr)
5770 +
5771 #ifndef __ASSEMBLY__
5772
5773 struct page;
5774 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5775 index c533835..84db18e 100644
5776 --- a/arch/um/kernel/process.c
5777 +++ b/arch/um/kernel/process.c
5778 @@ -406,22 +406,6 @@ int singlestepping(void * t)
5779 return 2;
5780 }
5781
5782 -/*
5783 - * Only x86 and x86_64 have an arch_align_stack().
5784 - * All other arches have "#define arch_align_stack(x) (x)"
5785 - * in their asm/system.h
5786 - * As this is included in UML from asm-um/system-generic.h,
5787 - * we can use it to behave as the subarch does.
5788 - */
5789 -#ifndef arch_align_stack
5790 -unsigned long arch_align_stack(unsigned long sp)
5791 -{
5792 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5793 - sp -= get_random_int() % 8192;
5794 - return sp & ~0xf;
5795 -}
5796 -#endif
5797 -
5798 unsigned long get_wchan(struct task_struct *p)
5799 {
5800 unsigned long stack_page, sp, ip;
5801 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5802 index efb4294..61bc18c 100644
5803 --- a/arch/x86/Kconfig
5804 +++ b/arch/x86/Kconfig
5805 @@ -235,7 +235,7 @@ config X86_HT
5806
5807 config X86_32_LAZY_GS
5808 def_bool y
5809 - depends on X86_32 && !CC_STACKPROTECTOR
5810 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5811
5812 config ARCH_HWEIGHT_CFLAGS
5813 string
5814 @@ -1022,7 +1022,7 @@ choice
5815
5816 config NOHIGHMEM
5817 bool "off"
5818 - depends on !X86_NUMAQ
5819 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5820 ---help---
5821 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5822 However, the address space of 32-bit x86 processors is only 4
5823 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
5824
5825 config HIGHMEM4G
5826 bool "4GB"
5827 - depends on !X86_NUMAQ
5828 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5829 ---help---
5830 Select this if you have a 32-bit processor and between 1 and 4
5831 gigabytes of physical RAM.
5832 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5833 hex
5834 default 0xB0000000 if VMSPLIT_3G_OPT
5835 default 0x80000000 if VMSPLIT_2G
5836 - default 0x78000000 if VMSPLIT_2G_OPT
5837 + default 0x70000000 if VMSPLIT_2G_OPT
5838 default 0x40000000 if VMSPLIT_1G
5839 default 0xC0000000
5840 depends on X86_32
5841 @@ -1496,6 +1496,7 @@ config SECCOMP
5842
5843 config CC_STACKPROTECTOR
5844 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5845 + depends on X86_64 || !PAX_MEMORY_UDEREF
5846 ---help---
5847 This option turns on the -fstack-protector GCC feature. This
5848 feature puts, at the beginning of functions, a canary value on
5849 @@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5850 config PHYSICAL_START
5851 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5852 default "0x1000000"
5853 + range 0x400000 0x40000000
5854 ---help---
5855 This gives the physical address where the kernel is loaded.
5856
5857 @@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5858 config PHYSICAL_ALIGN
5859 hex "Alignment value to which kernel should be aligned" if X86_32
5860 default "0x1000000"
5861 + range 0x400000 0x1000000 if PAX_KERNEXEC
5862 range 0x2000 0x1000000
5863 ---help---
5864 This value puts the alignment restrictions on physical address
5865 @@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5866 Say N if you want to disable CPU hotplug.
5867
5868 config COMPAT_VDSO
5869 - def_bool y
5870 + def_bool n
5871 prompt "Compat VDSO support"
5872 depends on X86_32 || IA32_EMULATION
5873 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5874 ---help---
5875 Map the 32-bit VDSO to the predictable old-style address too.
5876
5877 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5878 index e3ca7e0..b30b28a 100644
5879 --- a/arch/x86/Kconfig.cpu
5880 +++ b/arch/x86/Kconfig.cpu
5881 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5882
5883 config X86_F00F_BUG
5884 def_bool y
5885 - depends on M586MMX || M586TSC || M586 || M486 || M386
5886 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5887
5888 config X86_INVD_BUG
5889 def_bool y
5890 @@ -365,7 +365,7 @@ config X86_POPAD_OK
5891
5892 config X86_ALIGNMENT_16
5893 def_bool y
5894 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5895 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5896
5897 config X86_INTEL_USERCOPY
5898 def_bool y
5899 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
5900 # generates cmov.
5901 config X86_CMOV
5902 def_bool y
5903 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5904 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5905
5906 config X86_MINIMUM_CPU_FAMILY
5907 int
5908 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5909 index bf56e17..05f9891 100644
5910 --- a/arch/x86/Kconfig.debug
5911 +++ b/arch/x86/Kconfig.debug
5912 @@ -81,7 +81,7 @@ config X86_PTDUMP
5913 config DEBUG_RODATA
5914 bool "Write protect kernel read-only data structures"
5915 default y
5916 - depends on DEBUG_KERNEL
5917 + depends on DEBUG_KERNEL && BROKEN
5918 ---help---
5919 Mark the kernel read-only data as write-protected in the pagetables,
5920 in order to catch accidental (and incorrect) writes to such const
5921 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5922
5923 config DEBUG_SET_MODULE_RONX
5924 bool "Set loadable kernel module data as NX and text as RO"
5925 - depends on MODULES
5926 + depends on MODULES && BROKEN
5927 ---help---
5928 This option helps catch unintended modifications to loadable
5929 kernel module's text and read-only data. It also prevents execution
5930 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5931 index b02e509..2631e48 100644
5932 --- a/arch/x86/Makefile
5933 +++ b/arch/x86/Makefile
5934 @@ -46,6 +46,7 @@ else
5935 UTS_MACHINE := x86_64
5936 CHECKFLAGS += -D__x86_64__ -m64
5937
5938 + biarch := $(call cc-option,-m64)
5939 KBUILD_AFLAGS += -m64
5940 KBUILD_CFLAGS += -m64
5941
5942 @@ -195,3 +196,12 @@ define archhelp
5943 echo ' FDARGS="..." arguments for the booted kernel'
5944 echo ' FDINITRD=file initrd for the booted kernel'
5945 endef
5946 +
5947 +define OLD_LD
5948 +
5949 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5950 +*** Please upgrade your binutils to 2.18 or newer
5951 +endef
5952 +
5953 +archprepare:
5954 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5955 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5956 index 95365a8..52f857b 100644
5957 --- a/arch/x86/boot/Makefile
5958 +++ b/arch/x86/boot/Makefile
5959 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5960 $(call cc-option, -fno-stack-protector) \
5961 $(call cc-option, -mpreferred-stack-boundary=2)
5962 KBUILD_CFLAGS += $(call cc-option, -m32)
5963 +ifdef CONSTIFY_PLUGIN
5964 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5965 +endif
5966 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5967 GCOV_PROFILE := n
5968
5969 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5970 index 878e4b9..20537ab 100644
5971 --- a/arch/x86/boot/bitops.h
5972 +++ b/arch/x86/boot/bitops.h
5973 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5974 u8 v;
5975 const u32 *p = (const u32 *)addr;
5976
5977 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5978 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5979 return v;
5980 }
5981
5982 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5983
5984 static inline void set_bit(int nr, void *addr)
5985 {
5986 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5987 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5988 }
5989
5990 #endif /* BOOT_BITOPS_H */
5991 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5992 index c7093bd..d4247ffe0 100644
5993 --- a/arch/x86/boot/boot.h
5994 +++ b/arch/x86/boot/boot.h
5995 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5996 static inline u16 ds(void)
5997 {
5998 u16 seg;
5999 - asm("movw %%ds,%0" : "=rm" (seg));
6000 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6001 return seg;
6002 }
6003
6004 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
6005 static inline int memcmp(const void *s1, const void *s2, size_t len)
6006 {
6007 u8 diff;
6008 - asm("repe; cmpsb; setnz %0"
6009 + asm volatile("repe; cmpsb; setnz %0"
6010 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6011 return diff;
6012 }
6013 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
6014 index 09664ef..edc5d03 100644
6015 --- a/arch/x86/boot/compressed/Makefile
6016 +++ b/arch/x86/boot/compressed/Makefile
6017 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
6018 KBUILD_CFLAGS += $(cflags-y)
6019 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6020 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6021 +ifdef CONSTIFY_PLUGIN
6022 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6023 +endif
6024
6025 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6026 GCOV_PROFILE := n
6027 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
6028 index 67a655a..b924059 100644
6029 --- a/arch/x86/boot/compressed/head_32.S
6030 +++ b/arch/x86/boot/compressed/head_32.S
6031 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6032 notl %eax
6033 andl %eax, %ebx
6034 #else
6035 - movl $LOAD_PHYSICAL_ADDR, %ebx
6036 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6037 #endif
6038
6039 /* Target address to relocate to for decompression */
6040 @@ -162,7 +162,7 @@ relocated:
6041 * and where it was actually loaded.
6042 */
6043 movl %ebp, %ebx
6044 - subl $LOAD_PHYSICAL_ADDR, %ebx
6045 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6046 jz 2f /* Nothing to be done if loaded at compiled addr. */
6047 /*
6048 * Process relocations.
6049 @@ -170,8 +170,7 @@ relocated:
6050
6051 1: subl $4, %edi
6052 movl (%edi), %ecx
6053 - testl %ecx, %ecx
6054 - jz 2f
6055 + jecxz 2f
6056 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6057 jmp 1b
6058 2:
6059 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
6060 index 35af09d..99c9676 100644
6061 --- a/arch/x86/boot/compressed/head_64.S
6062 +++ b/arch/x86/boot/compressed/head_64.S
6063 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6064 notl %eax
6065 andl %eax, %ebx
6066 #else
6067 - movl $LOAD_PHYSICAL_ADDR, %ebx
6068 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6069 #endif
6070
6071 /* Target address to relocate to for decompression */
6072 @@ -233,7 +233,7 @@ ENTRY(startup_64)
6073 notq %rax
6074 andq %rax, %rbp
6075 #else
6076 - movq $LOAD_PHYSICAL_ADDR, %rbp
6077 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6078 #endif
6079
6080 /* Target address to relocate to for decompression */
6081 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
6082 index 3a19d04..7c1d55a 100644
6083 --- a/arch/x86/boot/compressed/misc.c
6084 +++ b/arch/x86/boot/compressed/misc.c
6085 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
6086 case PT_LOAD:
6087 #ifdef CONFIG_RELOCATABLE
6088 dest = output;
6089 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6090 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6091 #else
6092 dest = (void *)(phdr->p_paddr);
6093 #endif
6094 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
6095 error("Destination address too large");
6096 #endif
6097 #ifndef CONFIG_RELOCATABLE
6098 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6099 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6100 error("Wrong destination address");
6101 #endif
6102
6103 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
6104 index 89bbf4e..869908e 100644
6105 --- a/arch/x86/boot/compressed/relocs.c
6106 +++ b/arch/x86/boot/compressed/relocs.c
6107 @@ -13,8 +13,11 @@
6108
6109 static void die(char *fmt, ...);
6110
6111 +#include "../../../../include/generated/autoconf.h"
6112 +
6113 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6114 static Elf32_Ehdr ehdr;
6115 +static Elf32_Phdr *phdr;
6116 static unsigned long reloc_count, reloc_idx;
6117 static unsigned long *relocs;
6118
6119 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
6120 }
6121 }
6122
6123 +static void read_phdrs(FILE *fp)
6124 +{
6125 + unsigned int i;
6126 +
6127 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6128 + if (!phdr) {
6129 + die("Unable to allocate %d program headers\n",
6130 + ehdr.e_phnum);
6131 + }
6132 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6133 + die("Seek to %d failed: %s\n",
6134 + ehdr.e_phoff, strerror(errno));
6135 + }
6136 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6137 + die("Cannot read ELF program headers: %s\n",
6138 + strerror(errno));
6139 + }
6140 + for(i = 0; i < ehdr.e_phnum; i++) {
6141 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6142 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6143 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6144 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6145 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6146 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6147 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6148 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6149 + }
6150 +
6151 +}
6152 +
6153 static void read_shdrs(FILE *fp)
6154 {
6155 - int i;
6156 + unsigned int i;
6157 Elf32_Shdr shdr;
6158
6159 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6160 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
6161
6162 static void read_strtabs(FILE *fp)
6163 {
6164 - int i;
6165 + unsigned int i;
6166 for (i = 0; i < ehdr.e_shnum; i++) {
6167 struct section *sec = &secs[i];
6168 if (sec->shdr.sh_type != SHT_STRTAB) {
6169 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
6170
6171 static void read_symtabs(FILE *fp)
6172 {
6173 - int i,j;
6174 + unsigned int i,j;
6175 for (i = 0; i < ehdr.e_shnum; i++) {
6176 struct section *sec = &secs[i];
6177 if (sec->shdr.sh_type != SHT_SYMTAB) {
6178 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
6179
6180 static void read_relocs(FILE *fp)
6181 {
6182 - int i,j;
6183 + unsigned int i,j;
6184 + uint32_t base;
6185 +
6186 for (i = 0; i < ehdr.e_shnum; i++) {
6187 struct section *sec = &secs[i];
6188 if (sec->shdr.sh_type != SHT_REL) {
6189 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
6190 die("Cannot read symbol table: %s\n",
6191 strerror(errno));
6192 }
6193 + base = 0;
6194 + for (j = 0; j < ehdr.e_phnum; j++) {
6195 + if (phdr[j].p_type != PT_LOAD )
6196 + continue;
6197 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6198 + continue;
6199 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6200 + break;
6201 + }
6202 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6203 Elf32_Rel *rel = &sec->reltab[j];
6204 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6205 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6206 rel->r_info = elf32_to_cpu(rel->r_info);
6207 }
6208 }
6209 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
6210
6211 static void print_absolute_symbols(void)
6212 {
6213 - int i;
6214 + unsigned int i;
6215 printf("Absolute symbols\n");
6216 printf(" Num: Value Size Type Bind Visibility Name\n");
6217 for (i = 0; i < ehdr.e_shnum; i++) {
6218 struct section *sec = &secs[i];
6219 char *sym_strtab;
6220 Elf32_Sym *sh_symtab;
6221 - int j;
6222 + unsigned int j;
6223
6224 if (sec->shdr.sh_type != SHT_SYMTAB) {
6225 continue;
6226 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
6227
6228 static void print_absolute_relocs(void)
6229 {
6230 - int i, printed = 0;
6231 + unsigned int i, printed = 0;
6232
6233 for (i = 0; i < ehdr.e_shnum; i++) {
6234 struct section *sec = &secs[i];
6235 struct section *sec_applies, *sec_symtab;
6236 char *sym_strtab;
6237 Elf32_Sym *sh_symtab;
6238 - int j;
6239 + unsigned int j;
6240 if (sec->shdr.sh_type != SHT_REL) {
6241 continue;
6242 }
6243 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6244
6245 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6246 {
6247 - int i;
6248 + unsigned int i;
6249 /* Walk through the relocations */
6250 for (i = 0; i < ehdr.e_shnum; i++) {
6251 char *sym_strtab;
6252 Elf32_Sym *sh_symtab;
6253 struct section *sec_applies, *sec_symtab;
6254 - int j;
6255 + unsigned int j;
6256 struct section *sec = &secs[i];
6257
6258 if (sec->shdr.sh_type != SHT_REL) {
6259 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6260 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6261 continue;
6262 }
6263 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6264 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6265 + continue;
6266 +
6267 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6268 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6269 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6270 + continue;
6271 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6272 + continue;
6273 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6274 + continue;
6275 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6276 + continue;
6277 +#endif
6278 +
6279 switch (r_type) {
6280 case R_386_NONE:
6281 case R_386_PC32:
6282 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6283
6284 static void emit_relocs(int as_text)
6285 {
6286 - int i;
6287 + unsigned int i;
6288 /* Count how many relocations I have and allocate space for them. */
6289 reloc_count = 0;
6290 walk_relocs(count_reloc);
6291 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
6292 fname, strerror(errno));
6293 }
6294 read_ehdr(fp);
6295 + read_phdrs(fp);
6296 read_shdrs(fp);
6297 read_strtabs(fp);
6298 read_symtabs(fp);
6299 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6300 index 4d3ff03..e4972ff 100644
6301 --- a/arch/x86/boot/cpucheck.c
6302 +++ b/arch/x86/boot/cpucheck.c
6303 @@ -74,7 +74,7 @@ static int has_fpu(void)
6304 u16 fcw = -1, fsw = -1;
6305 u32 cr0;
6306
6307 - asm("movl %%cr0,%0" : "=r" (cr0));
6308 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6309 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6310 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6311 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6312 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6313 {
6314 u32 f0, f1;
6315
6316 - asm("pushfl ; "
6317 + asm volatile("pushfl ; "
6318 "pushfl ; "
6319 "popl %0 ; "
6320 "movl %0,%1 ; "
6321 @@ -115,7 +115,7 @@ static void get_flags(void)
6322 set_bit(X86_FEATURE_FPU, cpu.flags);
6323
6324 if (has_eflag(X86_EFLAGS_ID)) {
6325 - asm("cpuid"
6326 + asm volatile("cpuid"
6327 : "=a" (max_intel_level),
6328 "=b" (cpu_vendor[0]),
6329 "=d" (cpu_vendor[1]),
6330 @@ -124,7 +124,7 @@ static void get_flags(void)
6331
6332 if (max_intel_level >= 0x00000001 &&
6333 max_intel_level <= 0x0000ffff) {
6334 - asm("cpuid"
6335 + asm volatile("cpuid"
6336 : "=a" (tfms),
6337 "=c" (cpu.flags[4]),
6338 "=d" (cpu.flags[0])
6339 @@ -136,7 +136,7 @@ static void get_flags(void)
6340 cpu.model += ((tfms >> 16) & 0xf) << 4;
6341 }
6342
6343 - asm("cpuid"
6344 + asm volatile("cpuid"
6345 : "=a" (max_amd_level)
6346 : "a" (0x80000000)
6347 : "ebx", "ecx", "edx");
6348 @@ -144,7 +144,7 @@ static void get_flags(void)
6349 if (max_amd_level >= 0x80000001 &&
6350 max_amd_level <= 0x8000ffff) {
6351 u32 eax = 0x80000001;
6352 - asm("cpuid"
6353 + asm volatile("cpuid"
6354 : "+a" (eax),
6355 "=c" (cpu.flags[6]),
6356 "=d" (cpu.flags[1])
6357 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6358 u32 ecx = MSR_K7_HWCR;
6359 u32 eax, edx;
6360
6361 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6362 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6363 eax &= ~(1 << 15);
6364 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6365 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6366
6367 get_flags(); /* Make sure it really did something */
6368 err = check_flags();
6369 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6370 u32 ecx = MSR_VIA_FCR;
6371 u32 eax, edx;
6372
6373 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6374 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6375 eax |= (1<<1)|(1<<7);
6376 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6377 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6378
6379 set_bit(X86_FEATURE_CX8, cpu.flags);
6380 err = check_flags();
6381 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6382 u32 eax, edx;
6383 u32 level = 1;
6384
6385 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6386 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6387 - asm("cpuid"
6388 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6389 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6390 + asm volatile("cpuid"
6391 : "+a" (level), "=d" (cpu.flags[0])
6392 : : "ecx", "ebx");
6393 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6394 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6395
6396 err = check_flags();
6397 }
6398 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6399 index bdb4d45..0476680 100644
6400 --- a/arch/x86/boot/header.S
6401 +++ b/arch/x86/boot/header.S
6402 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6403 # single linked list of
6404 # struct setup_data
6405
6406 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6407 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6408
6409 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6410 #define VO_INIT_SIZE (VO__end - VO__text)
6411 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6412 index db75d07..8e6d0af 100644
6413 --- a/arch/x86/boot/memory.c
6414 +++ b/arch/x86/boot/memory.c
6415 @@ -19,7 +19,7 @@
6416
6417 static int detect_memory_e820(void)
6418 {
6419 - int count = 0;
6420 + unsigned int count = 0;
6421 struct biosregs ireg, oreg;
6422 struct e820entry *desc = boot_params.e820_map;
6423 static struct e820entry buf; /* static so it is zeroed */
6424 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6425 index 11e8c6e..fdbb1ed 100644
6426 --- a/arch/x86/boot/video-vesa.c
6427 +++ b/arch/x86/boot/video-vesa.c
6428 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6429
6430 boot_params.screen_info.vesapm_seg = oreg.es;
6431 boot_params.screen_info.vesapm_off = oreg.di;
6432 + boot_params.screen_info.vesapm_size = oreg.cx;
6433 }
6434
6435 /*
6436 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6437 index 43eda28..5ab5fdb 100644
6438 --- a/arch/x86/boot/video.c
6439 +++ b/arch/x86/boot/video.c
6440 @@ -96,7 +96,7 @@ static void store_mode_params(void)
6441 static unsigned int get_entry(void)
6442 {
6443 char entry_buf[4];
6444 - int i, len = 0;
6445 + unsigned int i, len = 0;
6446 int key;
6447 unsigned int v;
6448
6449 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6450 index 5b577d5..3c1fed4 100644
6451 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
6452 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6453 @@ -8,6 +8,8 @@
6454 * including this sentence is retained in full.
6455 */
6456
6457 +#include <asm/alternative-asm.h>
6458 +
6459 .extern crypto_ft_tab
6460 .extern crypto_it_tab
6461 .extern crypto_fl_tab
6462 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6463 je B192; \
6464 leaq 32(r9),r9;
6465
6466 +#define ret pax_force_retaddr 0, 1; ret
6467 +
6468 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6469 movq r1,r2; \
6470 movq r3,r4; \
6471 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6472 index be6d9e3..21fbbca 100644
6473 --- a/arch/x86/crypto/aesni-intel_asm.S
6474 +++ b/arch/x86/crypto/aesni-intel_asm.S
6475 @@ -31,6 +31,7 @@
6476
6477 #include <linux/linkage.h>
6478 #include <asm/inst.h>
6479 +#include <asm/alternative-asm.h>
6480
6481 #ifdef __x86_64__
6482 .data
6483 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6484 pop %r14
6485 pop %r13
6486 pop %r12
6487 + pax_force_retaddr 0, 1
6488 ret
6489 +ENDPROC(aesni_gcm_dec)
6490
6491
6492 /*****************************************************************************
6493 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6494 pop %r14
6495 pop %r13
6496 pop %r12
6497 + pax_force_retaddr 0, 1
6498 ret
6499 +ENDPROC(aesni_gcm_enc)
6500
6501 #endif
6502
6503 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
6504 pxor %xmm1, %xmm0
6505 movaps %xmm0, (TKEYP)
6506 add $0x10, TKEYP
6507 + pax_force_retaddr_bts
6508 ret
6509
6510 .align 4
6511 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
6512 shufps $0b01001110, %xmm2, %xmm1
6513 movaps %xmm1, 0x10(TKEYP)
6514 add $0x20, TKEYP
6515 + pax_force_retaddr_bts
6516 ret
6517
6518 .align 4
6519 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
6520
6521 movaps %xmm0, (TKEYP)
6522 add $0x10, TKEYP
6523 + pax_force_retaddr_bts
6524 ret
6525
6526 .align 4
6527 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
6528 pxor %xmm1, %xmm2
6529 movaps %xmm2, (TKEYP)
6530 add $0x10, TKEYP
6531 + pax_force_retaddr_bts
6532 ret
6533
6534 /*
6535 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6536 #ifndef __x86_64__
6537 popl KEYP
6538 #endif
6539 + pax_force_retaddr 0, 1
6540 ret
6541 +ENDPROC(aesni_set_key)
6542
6543 /*
6544 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6545 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6546 popl KLEN
6547 popl KEYP
6548 #endif
6549 + pax_force_retaddr 0, 1
6550 ret
6551 +ENDPROC(aesni_enc)
6552
6553 /*
6554 * _aesni_enc1: internal ABI
6555 @@ -1959,6 +1972,7 @@ _aesni_enc1:
6556 AESENC KEY STATE
6557 movaps 0x70(TKEYP), KEY
6558 AESENCLAST KEY STATE
6559 + pax_force_retaddr_bts
6560 ret
6561
6562 /*
6563 @@ -2067,6 +2081,7 @@ _aesni_enc4:
6564 AESENCLAST KEY STATE2
6565 AESENCLAST KEY STATE3
6566 AESENCLAST KEY STATE4
6567 + pax_force_retaddr_bts
6568 ret
6569
6570 /*
6571 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6572 popl KLEN
6573 popl KEYP
6574 #endif
6575 + pax_force_retaddr 0, 1
6576 ret
6577 +ENDPROC(aesni_dec)
6578
6579 /*
6580 * _aesni_dec1: internal ABI
6581 @@ -2146,6 +2163,7 @@ _aesni_dec1:
6582 AESDEC KEY STATE
6583 movaps 0x70(TKEYP), KEY
6584 AESDECLAST KEY STATE
6585 + pax_force_retaddr_bts
6586 ret
6587
6588 /*
6589 @@ -2254,6 +2272,7 @@ _aesni_dec4:
6590 AESDECLAST KEY STATE2
6591 AESDECLAST KEY STATE3
6592 AESDECLAST KEY STATE4
6593 + pax_force_retaddr_bts
6594 ret
6595
6596 /*
6597 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6598 popl KEYP
6599 popl LEN
6600 #endif
6601 + pax_force_retaddr 0, 1
6602 ret
6603 +ENDPROC(aesni_ecb_enc)
6604
6605 /*
6606 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6607 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6608 popl KEYP
6609 popl LEN
6610 #endif
6611 + pax_force_retaddr 0, 1
6612 ret
6613 +ENDPROC(aesni_ecb_dec)
6614
6615 /*
6616 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6617 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6618 popl LEN
6619 popl IVP
6620 #endif
6621 + pax_force_retaddr 0, 1
6622 ret
6623 +ENDPROC(aesni_cbc_enc)
6624
6625 /*
6626 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6627 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6628 popl LEN
6629 popl IVP
6630 #endif
6631 + pax_force_retaddr 0, 1
6632 ret
6633 +ENDPROC(aesni_cbc_dec)
6634
6635 #ifdef __x86_64__
6636 .align 16
6637 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
6638 mov $1, TCTR_LOW
6639 MOVQ_R64_XMM TCTR_LOW INC
6640 MOVQ_R64_XMM CTR TCTR_LOW
6641 + pax_force_retaddr_bts
6642 ret
6643
6644 /*
6645 @@ -2552,6 +2580,7 @@ _aesni_inc:
6646 .Linc_low:
6647 movaps CTR, IV
6648 PSHUFB_XMM BSWAP_MASK IV
6649 + pax_force_retaddr_bts
6650 ret
6651
6652 /*
6653 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6654 .Lctr_enc_ret:
6655 movups IV, (IVP)
6656 .Lctr_enc_just_ret:
6657 + pax_force_retaddr 0, 1
6658 ret
6659 +ENDPROC(aesni_ctr_enc)
6660 #endif
6661 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6662 index 391d245..67f35c2 100644
6663 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6664 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6665 @@ -20,6 +20,8 @@
6666 *
6667 */
6668
6669 +#include <asm/alternative-asm.h>
6670 +
6671 .file "blowfish-x86_64-asm.S"
6672 .text
6673
6674 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
6675 jnz __enc_xor;
6676
6677 write_block();
6678 + pax_force_retaddr 0, 1
6679 ret;
6680 __enc_xor:
6681 xor_block();
6682 + pax_force_retaddr 0, 1
6683 ret;
6684
6685 .align 8
6686 @@ -188,6 +192,7 @@ blowfish_dec_blk:
6687
6688 movq %r11, %rbp;
6689
6690 + pax_force_retaddr 0, 1
6691 ret;
6692
6693 /**********************************************************************
6694 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6695
6696 popq %rbx;
6697 popq %rbp;
6698 + pax_force_retaddr 0, 1
6699 ret;
6700
6701 __enc_xor4:
6702 @@ -349,6 +355,7 @@ __enc_xor4:
6703
6704 popq %rbx;
6705 popq %rbp;
6706 + pax_force_retaddr 0, 1
6707 ret;
6708
6709 .align 8
6710 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6711 popq %rbx;
6712 popq %rbp;
6713
6714 + pax_force_retaddr 0, 1
6715 ret;
6716
6717 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6718 index 6214a9b..1f4fc9a 100644
6719 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6720 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6721 @@ -1,3 +1,5 @@
6722 +#include <asm/alternative-asm.h>
6723 +
6724 # enter ECRYPT_encrypt_bytes
6725 .text
6726 .p2align 5
6727 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6728 add %r11,%rsp
6729 mov %rdi,%rax
6730 mov %rsi,%rdx
6731 + pax_force_retaddr 0, 1
6732 ret
6733 # bytesatleast65:
6734 ._bytesatleast65:
6735 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
6736 add %r11,%rsp
6737 mov %rdi,%rax
6738 mov %rsi,%rdx
6739 + pax_force_retaddr
6740 ret
6741 # enter ECRYPT_ivsetup
6742 .text
6743 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6744 add %r11,%rsp
6745 mov %rdi,%rax
6746 mov %rsi,%rdx
6747 + pax_force_retaddr
6748 ret
6749 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6750 index b2c2f57..8470cab 100644
6751 --- a/arch/x86/crypto/sha1_ssse3_asm.S
6752 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
6753 @@ -28,6 +28,8 @@
6754 * (at your option) any later version.
6755 */
6756
6757 +#include <asm/alternative-asm.h>
6758 +
6759 #define CTX %rdi // arg1
6760 #define BUF %rsi // arg2
6761 #define CNT %rdx // arg3
6762 @@ -104,6 +106,7 @@
6763 pop %r12
6764 pop %rbp
6765 pop %rbx
6766 + pax_force_retaddr 0, 1
6767 ret
6768
6769 .size \name, .-\name
6770 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6771 index 5b012a2..36d5364 100644
6772 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6773 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6774 @@ -20,6 +20,8 @@
6775 *
6776 */
6777
6778 +#include <asm/alternative-asm.h>
6779 +
6780 .file "twofish-x86_64-asm-3way.S"
6781 .text
6782
6783 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6784 popq %r13;
6785 popq %r14;
6786 popq %r15;
6787 + pax_force_retaddr 0, 1
6788 ret;
6789
6790 __enc_xor3:
6791 @@ -271,6 +274,7 @@ __enc_xor3:
6792 popq %r13;
6793 popq %r14;
6794 popq %r15;
6795 + pax_force_retaddr 0, 1
6796 ret;
6797
6798 .global twofish_dec_blk_3way
6799 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6800 popq %r13;
6801 popq %r14;
6802 popq %r15;
6803 + pax_force_retaddr 0, 1
6804 ret;
6805
6806 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6807 index 7bcf3fc..f53832f 100644
6808 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6809 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6810 @@ -21,6 +21,7 @@
6811 .text
6812
6813 #include <asm/asm-offsets.h>
6814 +#include <asm/alternative-asm.h>
6815
6816 #define a_offset 0
6817 #define b_offset 4
6818 @@ -268,6 +269,7 @@ twofish_enc_blk:
6819
6820 popq R1
6821 movq $1,%rax
6822 + pax_force_retaddr 0, 1
6823 ret
6824
6825 twofish_dec_blk:
6826 @@ -319,4 +321,5 @@ twofish_dec_blk:
6827
6828 popq R1
6829 movq $1,%rax
6830 + pax_force_retaddr 0, 1
6831 ret
6832 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6833 index fd84387..0b4af7d 100644
6834 --- a/arch/x86/ia32/ia32_aout.c
6835 +++ b/arch/x86/ia32/ia32_aout.c
6836 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6837 unsigned long dump_start, dump_size;
6838 struct user32 dump;
6839
6840 + memset(&dump, 0, sizeof(dump));
6841 +
6842 fs = get_fs();
6843 set_fs(KERNEL_DS);
6844 has_dumped = 1;
6845 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6846 index 6557769..ef6ae89 100644
6847 --- a/arch/x86/ia32/ia32_signal.c
6848 +++ b/arch/x86/ia32/ia32_signal.c
6849 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6850 }
6851 seg = get_fs();
6852 set_fs(KERNEL_DS);
6853 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6854 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6855 set_fs(seg);
6856 if (ret >= 0 && uoss_ptr) {
6857 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6858 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6859 */
6860 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6861 size_t frame_size,
6862 - void **fpstate)
6863 + void __user **fpstate)
6864 {
6865 unsigned long sp;
6866
6867 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6868
6869 if (used_math()) {
6870 sp = sp - sig_xstate_ia32_size;
6871 - *fpstate = (struct _fpstate_ia32 *) sp;
6872 + *fpstate = (struct _fpstate_ia32 __user *) sp;
6873 if (save_i387_xstate_ia32(*fpstate) < 0)
6874 return (void __user *) -1L;
6875 }
6876 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6877 sp -= frame_size;
6878 /* Align the stack pointer according to the i386 ABI,
6879 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6880 - sp = ((sp + 4) & -16ul) - 4;
6881 + sp = ((sp - 12) & -16ul) - 4;
6882 return (void __user *) sp;
6883 }
6884
6885 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6886 * These are actually not used anymore, but left because some
6887 * gdb versions depend on them as a marker.
6888 */
6889 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6890 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6891 } put_user_catch(err);
6892
6893 if (err)
6894 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6895 0xb8,
6896 __NR_ia32_rt_sigreturn,
6897 0x80cd,
6898 - 0,
6899 + 0
6900 };
6901
6902 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6903 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6904
6905 if (ka->sa.sa_flags & SA_RESTORER)
6906 restorer = ka->sa.sa_restorer;
6907 + else if (current->mm->context.vdso)
6908 + /* Return stub is in 32bit vsyscall page */
6909 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6910 else
6911 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6912 - rt_sigreturn);
6913 + restorer = &frame->retcode;
6914 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6915
6916 /*
6917 * Not actually used anymore, but left because some gdb
6918 * versions need it.
6919 */
6920 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6921 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6922 } put_user_catch(err);
6923
6924 if (err)
6925 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6926 index a6253ec..4ad2120 100644
6927 --- a/arch/x86/ia32/ia32entry.S
6928 +++ b/arch/x86/ia32/ia32entry.S
6929 @@ -13,7 +13,9 @@
6930 #include <asm/thread_info.h>
6931 #include <asm/segment.h>
6932 #include <asm/irqflags.h>
6933 +#include <asm/pgtable.h>
6934 #include <linux/linkage.h>
6935 +#include <asm/alternative-asm.h>
6936
6937 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6938 #include <linux/elf-em.h>
6939 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6940 ENDPROC(native_irq_enable_sysexit)
6941 #endif
6942
6943 + .macro pax_enter_kernel_user
6944 + pax_set_fptr_mask
6945 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6946 + call pax_enter_kernel_user
6947 +#endif
6948 + .endm
6949 +
6950 + .macro pax_exit_kernel_user
6951 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6952 + call pax_exit_kernel_user
6953 +#endif
6954 +#ifdef CONFIG_PAX_RANDKSTACK
6955 + pushq %rax
6956 + pushq %r11
6957 + call pax_randomize_kstack
6958 + popq %r11
6959 + popq %rax
6960 +#endif
6961 + .endm
6962 +
6963 +.macro pax_erase_kstack
6964 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6965 + call pax_erase_kstack
6966 +#endif
6967 +.endm
6968 +
6969 /*
6970 * 32bit SYSENTER instruction entry.
6971 *
6972 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6973 CFI_REGISTER rsp,rbp
6974 SWAPGS_UNSAFE_STACK
6975 movq PER_CPU_VAR(kernel_stack), %rsp
6976 - addq $(KERNEL_STACK_OFFSET),%rsp
6977 - /*
6978 - * No need to follow this irqs on/off section: the syscall
6979 - * disabled irqs, here we enable it straight after entry:
6980 - */
6981 - ENABLE_INTERRUPTS(CLBR_NONE)
6982 movl %ebp,%ebp /* zero extension */
6983 pushq_cfi $__USER32_DS
6984 /*CFI_REL_OFFSET ss,0*/
6985 @@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6986 CFI_REL_OFFSET rsp,0
6987 pushfq_cfi
6988 /*CFI_REL_OFFSET rflags,0*/
6989 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6990 - CFI_REGISTER rip,r10
6991 + orl $X86_EFLAGS_IF,(%rsp)
6992 + GET_THREAD_INFO(%r11)
6993 + movl TI_sysenter_return(%r11), %r11d
6994 + CFI_REGISTER rip,r11
6995 pushq_cfi $__USER32_CS
6996 /*CFI_REL_OFFSET cs,0*/
6997 movl %eax, %eax
6998 - pushq_cfi %r10
6999 + pushq_cfi %r11
7000 CFI_REL_OFFSET rip,0
7001 pushq_cfi %rax
7002 cld
7003 SAVE_ARGS 0,1,0
7004 + pax_enter_kernel_user
7005 + /*
7006 + * No need to follow this irqs on/off section: the syscall
7007 + * disabled irqs, here we enable it straight after entry:
7008 + */
7009 + ENABLE_INTERRUPTS(CLBR_NONE)
7010 /* no need to do an access_ok check here because rbp has been
7011 32bit zero extended */
7012 +
7013 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7014 + mov $PAX_USER_SHADOW_BASE,%r11
7015 + add %r11,%rbp
7016 +#endif
7017 +
7018 1: movl (%rbp),%ebp
7019 .section __ex_table,"a"
7020 .quad 1b,ia32_badarg
7021 .previous
7022 - GET_THREAD_INFO(%r10)
7023 - orl $TS_COMPAT,TI_status(%r10)
7024 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7025 + GET_THREAD_INFO(%r11)
7026 + orl $TS_COMPAT,TI_status(%r11)
7027 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7028 CFI_REMEMBER_STATE
7029 jnz sysenter_tracesys
7030 cmpq $(IA32_NR_syscalls-1),%rax
7031 @@ -162,13 +198,15 @@ sysenter_do_call:
7032 sysenter_dispatch:
7033 call *ia32_sys_call_table(,%rax,8)
7034 movq %rax,RAX-ARGOFFSET(%rsp)
7035 - GET_THREAD_INFO(%r10)
7036 + GET_THREAD_INFO(%r11)
7037 DISABLE_INTERRUPTS(CLBR_NONE)
7038 TRACE_IRQS_OFF
7039 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7040 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7041 jnz sysexit_audit
7042 sysexit_from_sys_call:
7043 - andl $~TS_COMPAT,TI_status(%r10)
7044 + pax_exit_kernel_user
7045 + pax_erase_kstack
7046 + andl $~TS_COMPAT,TI_status(%r11)
7047 /* clear IF, that popfq doesn't enable interrupts early */
7048 andl $~0x200,EFLAGS-R11(%rsp)
7049 movl RIP-R11(%rsp),%edx /* User %eip */
7050 @@ -194,6 +232,9 @@ sysexit_from_sys_call:
7051 movl %eax,%esi /* 2nd arg: syscall number */
7052 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7053 call audit_syscall_entry
7054 +
7055 + pax_erase_kstack
7056 +
7057 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7058 cmpq $(IA32_NR_syscalls-1),%rax
7059 ja ia32_badsys
7060 @@ -205,7 +246,7 @@ sysexit_from_sys_call:
7061 .endm
7062
7063 .macro auditsys_exit exit
7064 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7065 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7066 jnz ia32_ret_from_sys_call
7067 TRACE_IRQS_ON
7068 sti
7069 @@ -215,12 +256,12 @@ sysexit_from_sys_call:
7070 movzbl %al,%edi /* zero-extend that into %edi */
7071 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
7072 call audit_syscall_exit
7073 - GET_THREAD_INFO(%r10)
7074 + GET_THREAD_INFO(%r11)
7075 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
7076 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
7077 cli
7078 TRACE_IRQS_OFF
7079 - testl %edi,TI_flags(%r10)
7080 + testl %edi,TI_flags(%r11)
7081 jz \exit
7082 CLEAR_RREGS -ARGOFFSET
7083 jmp int_with_check
7084 @@ -238,7 +279,7 @@ sysexit_audit:
7085
7086 sysenter_tracesys:
7087 #ifdef CONFIG_AUDITSYSCALL
7088 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7089 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7090 jz sysenter_auditsys
7091 #endif
7092 SAVE_REST
7093 @@ -246,6 +287,9 @@ sysenter_tracesys:
7094 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
7095 movq %rsp,%rdi /* &pt_regs -> arg1 */
7096 call syscall_trace_enter
7097 +
7098 + pax_erase_kstack
7099 +
7100 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
7101 RESTORE_REST
7102 cmpq $(IA32_NR_syscalls-1),%rax
7103 @@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
7104 ENTRY(ia32_cstar_target)
7105 CFI_STARTPROC32 simple
7106 CFI_SIGNAL_FRAME
7107 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
7108 + CFI_DEF_CFA rsp,0
7109 CFI_REGISTER rip,rcx
7110 /*CFI_REGISTER rflags,r11*/
7111 SWAPGS_UNSAFE_STACK
7112 movl %esp,%r8d
7113 CFI_REGISTER rsp,r8
7114 movq PER_CPU_VAR(kernel_stack),%rsp
7115 + SAVE_ARGS 8*6,0,0
7116 + pax_enter_kernel_user
7117 /*
7118 * No need to follow this irqs on/off section: the syscall
7119 * disabled irqs and here we enable it straight after entry:
7120 */
7121 ENABLE_INTERRUPTS(CLBR_NONE)
7122 - SAVE_ARGS 8,0,0
7123 movl %eax,%eax /* zero extension */
7124 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7125 movq %rcx,RIP-ARGOFFSET(%rsp)
7126 @@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
7127 /* no need to do an access_ok check here because r8 has been
7128 32bit zero extended */
7129 /* hardware stack frame is complete now */
7130 +
7131 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7132 + mov $PAX_USER_SHADOW_BASE,%r11
7133 + add %r11,%r8
7134 +#endif
7135 +
7136 1: movl (%r8),%r9d
7137 .section __ex_table,"a"
7138 .quad 1b,ia32_badarg
7139 .previous
7140 - GET_THREAD_INFO(%r10)
7141 - orl $TS_COMPAT,TI_status(%r10)
7142 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7143 + GET_THREAD_INFO(%r11)
7144 + orl $TS_COMPAT,TI_status(%r11)
7145 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7146 CFI_REMEMBER_STATE
7147 jnz cstar_tracesys
7148 cmpq $IA32_NR_syscalls-1,%rax
7149 @@ -321,13 +372,15 @@ cstar_do_call:
7150 cstar_dispatch:
7151 call *ia32_sys_call_table(,%rax,8)
7152 movq %rax,RAX-ARGOFFSET(%rsp)
7153 - GET_THREAD_INFO(%r10)
7154 + GET_THREAD_INFO(%r11)
7155 DISABLE_INTERRUPTS(CLBR_NONE)
7156 TRACE_IRQS_OFF
7157 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7158 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7159 jnz sysretl_audit
7160 sysretl_from_sys_call:
7161 - andl $~TS_COMPAT,TI_status(%r10)
7162 + pax_exit_kernel_user
7163 + pax_erase_kstack
7164 + andl $~TS_COMPAT,TI_status(%r11)
7165 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
7166 movl RIP-ARGOFFSET(%rsp),%ecx
7167 CFI_REGISTER rip,rcx
7168 @@ -355,7 +408,7 @@ sysretl_audit:
7169
7170 cstar_tracesys:
7171 #ifdef CONFIG_AUDITSYSCALL
7172 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7173 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7174 jz cstar_auditsys
7175 #endif
7176 xchgl %r9d,%ebp
7177 @@ -364,6 +417,9 @@ cstar_tracesys:
7178 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
7179 movq %rsp,%rdi /* &pt_regs -> arg1 */
7180 call syscall_trace_enter
7181 +
7182 + pax_erase_kstack
7183 +
7184 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
7185 RESTORE_REST
7186 xchgl %ebp,%r9d
7187 @@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
7188 CFI_REL_OFFSET rip,RIP-RIP
7189 PARAVIRT_ADJUST_EXCEPTION_FRAME
7190 SWAPGS
7191 - /*
7192 - * No need to follow this irqs on/off section: the syscall
7193 - * disabled irqs and here we enable it straight after entry:
7194 - */
7195 - ENABLE_INTERRUPTS(CLBR_NONE)
7196 movl %eax,%eax
7197 pushq_cfi %rax
7198 cld
7199 /* note the registers are not zero extended to the sf.
7200 this could be a problem. */
7201 SAVE_ARGS 0,1,0
7202 - GET_THREAD_INFO(%r10)
7203 - orl $TS_COMPAT,TI_status(%r10)
7204 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7205 + pax_enter_kernel_user
7206 + /*
7207 + * No need to follow this irqs on/off section: the syscall
7208 + * disabled irqs and here we enable it straight after entry:
7209 + */
7210 + ENABLE_INTERRUPTS(CLBR_NONE)
7211 + GET_THREAD_INFO(%r11)
7212 + orl $TS_COMPAT,TI_status(%r11)
7213 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7214 jnz ia32_tracesys
7215 cmpq $(IA32_NR_syscalls-1),%rax
7216 ja ia32_badsys
7217 @@ -441,6 +498,9 @@ ia32_tracesys:
7218 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
7219 movq %rsp,%rdi /* &pt_regs -> arg1 */
7220 call syscall_trace_enter
7221 +
7222 + pax_erase_kstack
7223 +
7224 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
7225 RESTORE_REST
7226 cmpq $(IA32_NR_syscalls-1),%rax
7227 @@ -455,6 +515,7 @@ ia32_badsys:
7228
7229 quiet_ni_syscall:
7230 movq $-ENOSYS,%rax
7231 + pax_force_retaddr
7232 ret
7233 CFI_ENDPROC
7234
7235 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
7236 index f6f5c53..b358b28 100644
7237 --- a/arch/x86/ia32/sys_ia32.c
7238 +++ b/arch/x86/ia32/sys_ia32.c
7239 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
7240 */
7241 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
7242 {
7243 - typeof(ubuf->st_uid) uid = 0;
7244 - typeof(ubuf->st_gid) gid = 0;
7245 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
7246 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
7247 SET_UID(uid, stat->uid);
7248 SET_GID(gid, stat->gid);
7249 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7250 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7251 }
7252 set_fs(KERNEL_DS);
7253 ret = sys_rt_sigprocmask(how,
7254 - set ? (sigset_t __user *)&s : NULL,
7255 - oset ? (sigset_t __user *)&s : NULL,
7256 + set ? (sigset_t __force_user *)&s : NULL,
7257 + oset ? (sigset_t __force_user *)&s : NULL,
7258 sigsetsize);
7259 set_fs(old_fs);
7260 if (ret)
7261 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7262 return alarm_setitimer(seconds);
7263 }
7264
7265 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7266 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7267 int options)
7268 {
7269 return compat_sys_wait4(pid, stat_addr, options, NULL);
7270 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7271 mm_segment_t old_fs = get_fs();
7272
7273 set_fs(KERNEL_DS);
7274 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7275 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7276 set_fs(old_fs);
7277 if (put_compat_timespec(&t, interval))
7278 return -EFAULT;
7279 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7280 mm_segment_t old_fs = get_fs();
7281
7282 set_fs(KERNEL_DS);
7283 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7284 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7285 set_fs(old_fs);
7286 if (!ret) {
7287 switch (_NSIG_WORDS) {
7288 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7289 if (copy_siginfo_from_user32(&info, uinfo))
7290 return -EFAULT;
7291 set_fs(KERNEL_DS);
7292 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7293 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7294 set_fs(old_fs);
7295 return ret;
7296 }
7297 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7298 return -EFAULT;
7299
7300 set_fs(KERNEL_DS);
7301 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7302 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7303 count);
7304 set_fs(old_fs);
7305
7306 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7307 index 091508b..7692c6f 100644
7308 --- a/arch/x86/include/asm/alternative-asm.h
7309 +++ b/arch/x86/include/asm/alternative-asm.h
7310 @@ -4,10 +4,10 @@
7311
7312 #ifdef CONFIG_SMP
7313 .macro LOCK_PREFIX
7314 -1: lock
7315 +672: lock
7316 .section .smp_locks,"a"
7317 .balign 4
7318 - .long 1b - .
7319 + .long 672b - .
7320 .previous
7321 .endm
7322 #else
7323 @@ -15,6 +15,45 @@
7324 .endm
7325 #endif
7326
7327 +#ifdef KERNEXEC_PLUGIN
7328 + .macro pax_force_retaddr_bts rip=0
7329 + btsq $63,\rip(%rsp)
7330 + .endm
7331 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7332 + .macro pax_force_retaddr rip=0, reload=0
7333 + btsq $63,\rip(%rsp)
7334 + .endm
7335 + .macro pax_force_fptr ptr
7336 + btsq $63,\ptr
7337 + .endm
7338 + .macro pax_set_fptr_mask
7339 + .endm
7340 +#endif
7341 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7342 + .macro pax_force_retaddr rip=0, reload=0
7343 + .if \reload
7344 + pax_set_fptr_mask
7345 + .endif
7346 + orq %r10,\rip(%rsp)
7347 + .endm
7348 + .macro pax_force_fptr ptr
7349 + orq %r10,\ptr
7350 + .endm
7351 + .macro pax_set_fptr_mask
7352 + movabs $0x8000000000000000,%r10
7353 + .endm
7354 +#endif
7355 +#else
7356 + .macro pax_force_retaddr rip=0, reload=0
7357 + .endm
7358 + .macro pax_force_fptr ptr
7359 + .endm
7360 + .macro pax_force_retaddr_bts rip=0
7361 + .endm
7362 + .macro pax_set_fptr_mask
7363 + .endm
7364 +#endif
7365 +
7366 .macro altinstruction_entry orig alt feature orig_len alt_len
7367 .long \orig - .
7368 .long \alt - .
7369 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7370 index 37ad100..7d47faa 100644
7371 --- a/arch/x86/include/asm/alternative.h
7372 +++ b/arch/x86/include/asm/alternative.h
7373 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7374 ".section .discard,\"aw\",@progbits\n" \
7375 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7376 ".previous\n" \
7377 - ".section .altinstr_replacement, \"ax\"\n" \
7378 + ".section .altinstr_replacement, \"a\"\n" \
7379 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7380 ".previous"
7381
7382 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7383 index 1a6c09a..fec2432 100644
7384 --- a/arch/x86/include/asm/apic.h
7385 +++ b/arch/x86/include/asm/apic.h
7386 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7387
7388 #ifdef CONFIG_X86_LOCAL_APIC
7389
7390 -extern unsigned int apic_verbosity;
7391 +extern int apic_verbosity;
7392 extern int local_apic_timer_c2_ok;
7393
7394 extern int disable_apic;
7395 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7396 index 20370c6..a2eb9b0 100644
7397 --- a/arch/x86/include/asm/apm.h
7398 +++ b/arch/x86/include/asm/apm.h
7399 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7400 __asm__ __volatile__(APM_DO_ZERO_SEGS
7401 "pushl %%edi\n\t"
7402 "pushl %%ebp\n\t"
7403 - "lcall *%%cs:apm_bios_entry\n\t"
7404 + "lcall *%%ss:apm_bios_entry\n\t"
7405 "setc %%al\n\t"
7406 "popl %%ebp\n\t"
7407 "popl %%edi\n\t"
7408 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7409 __asm__ __volatile__(APM_DO_ZERO_SEGS
7410 "pushl %%edi\n\t"
7411 "pushl %%ebp\n\t"
7412 - "lcall *%%cs:apm_bios_entry\n\t"
7413 + "lcall *%%ss:apm_bios_entry\n\t"
7414 "setc %%bl\n\t"
7415 "popl %%ebp\n\t"
7416 "popl %%edi\n\t"
7417 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7418 index 58cb6d4..ca9010d 100644
7419 --- a/arch/x86/include/asm/atomic.h
7420 +++ b/arch/x86/include/asm/atomic.h
7421 @@ -22,7 +22,18 @@
7422 */
7423 static inline int atomic_read(const atomic_t *v)
7424 {
7425 - return (*(volatile int *)&(v)->counter);
7426 + return (*(volatile const int *)&(v)->counter);
7427 +}
7428 +
7429 +/**
7430 + * atomic_read_unchecked - read atomic variable
7431 + * @v: pointer of type atomic_unchecked_t
7432 + *
7433 + * Atomically reads the value of @v.
7434 + */
7435 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7436 +{
7437 + return (*(volatile const int *)&(v)->counter);
7438 }
7439
7440 /**
7441 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7442 }
7443
7444 /**
7445 + * atomic_set_unchecked - set atomic variable
7446 + * @v: pointer of type atomic_unchecked_t
7447 + * @i: required value
7448 + *
7449 + * Atomically sets the value of @v to @i.
7450 + */
7451 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7452 +{
7453 + v->counter = i;
7454 +}
7455 +
7456 +/**
7457 * atomic_add - add integer to atomic variable
7458 * @i: integer value to add
7459 * @v: pointer of type atomic_t
7460 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7461 */
7462 static inline void atomic_add(int i, atomic_t *v)
7463 {
7464 - asm volatile(LOCK_PREFIX "addl %1,%0"
7465 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7466 +
7467 +#ifdef CONFIG_PAX_REFCOUNT
7468 + "jno 0f\n"
7469 + LOCK_PREFIX "subl %1,%0\n"
7470 + "int $4\n0:\n"
7471 + _ASM_EXTABLE(0b, 0b)
7472 +#endif
7473 +
7474 + : "+m" (v->counter)
7475 + : "ir" (i));
7476 +}
7477 +
7478 +/**
7479 + * atomic_add_unchecked - add integer to atomic variable
7480 + * @i: integer value to add
7481 + * @v: pointer of type atomic_unchecked_t
7482 + *
7483 + * Atomically adds @i to @v.
7484 + */
7485 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7486 +{
7487 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7488 : "+m" (v->counter)
7489 : "ir" (i));
7490 }
7491 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7492 */
7493 static inline void atomic_sub(int i, atomic_t *v)
7494 {
7495 - asm volatile(LOCK_PREFIX "subl %1,%0"
7496 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7497 +
7498 +#ifdef CONFIG_PAX_REFCOUNT
7499 + "jno 0f\n"
7500 + LOCK_PREFIX "addl %1,%0\n"
7501 + "int $4\n0:\n"
7502 + _ASM_EXTABLE(0b, 0b)
7503 +#endif
7504 +
7505 + : "+m" (v->counter)
7506 + : "ir" (i));
7507 +}
7508 +
7509 +/**
7510 + * atomic_sub_unchecked - subtract integer from atomic variable
7511 + * @i: integer value to subtract
7512 + * @v: pointer of type atomic_unchecked_t
7513 + *
7514 + * Atomically subtracts @i from @v.
7515 + */
7516 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7517 +{
7518 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7519 : "+m" (v->counter)
7520 : "ir" (i));
7521 }
7522 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7523 {
7524 unsigned char c;
7525
7526 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7527 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7528 +
7529 +#ifdef CONFIG_PAX_REFCOUNT
7530 + "jno 0f\n"
7531 + LOCK_PREFIX "addl %2,%0\n"
7532 + "int $4\n0:\n"
7533 + _ASM_EXTABLE(0b, 0b)
7534 +#endif
7535 +
7536 + "sete %1\n"
7537 : "+m" (v->counter), "=qm" (c)
7538 : "ir" (i) : "memory");
7539 return c;
7540 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7541 */
7542 static inline void atomic_inc(atomic_t *v)
7543 {
7544 - asm volatile(LOCK_PREFIX "incl %0"
7545 + asm volatile(LOCK_PREFIX "incl %0\n"
7546 +
7547 +#ifdef CONFIG_PAX_REFCOUNT
7548 + "jno 0f\n"
7549 + LOCK_PREFIX "decl %0\n"
7550 + "int $4\n0:\n"
7551 + _ASM_EXTABLE(0b, 0b)
7552 +#endif
7553 +
7554 + : "+m" (v->counter));
7555 +}
7556 +
7557 +/**
7558 + * atomic_inc_unchecked - increment atomic variable
7559 + * @v: pointer of type atomic_unchecked_t
7560 + *
7561 + * Atomically increments @v by 1.
7562 + */
7563 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7564 +{
7565 + asm volatile(LOCK_PREFIX "incl %0\n"
7566 : "+m" (v->counter));
7567 }
7568
7569 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7570 */
7571 static inline void atomic_dec(atomic_t *v)
7572 {
7573 - asm volatile(LOCK_PREFIX "decl %0"
7574 + asm volatile(LOCK_PREFIX "decl %0\n"
7575 +
7576 +#ifdef CONFIG_PAX_REFCOUNT
7577 + "jno 0f\n"
7578 + LOCK_PREFIX "incl %0\n"
7579 + "int $4\n0:\n"
7580 + _ASM_EXTABLE(0b, 0b)
7581 +#endif
7582 +
7583 + : "+m" (v->counter));
7584 +}
7585 +
7586 +/**
7587 + * atomic_dec_unchecked - decrement atomic variable
7588 + * @v: pointer of type atomic_unchecked_t
7589 + *
7590 + * Atomically decrements @v by 1.
7591 + */
7592 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7593 +{
7594 + asm volatile(LOCK_PREFIX "decl %0\n"
7595 : "+m" (v->counter));
7596 }
7597
7598 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7599 {
7600 unsigned char c;
7601
7602 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7603 + asm volatile(LOCK_PREFIX "decl %0\n"
7604 +
7605 +#ifdef CONFIG_PAX_REFCOUNT
7606 + "jno 0f\n"
7607 + LOCK_PREFIX "incl %0\n"
7608 + "int $4\n0:\n"
7609 + _ASM_EXTABLE(0b, 0b)
7610 +#endif
7611 +
7612 + "sete %1\n"
7613 : "+m" (v->counter), "=qm" (c)
7614 : : "memory");
7615 return c != 0;
7616 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7617 {
7618 unsigned char c;
7619
7620 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7621 + asm volatile(LOCK_PREFIX "incl %0\n"
7622 +
7623 +#ifdef CONFIG_PAX_REFCOUNT
7624 + "jno 0f\n"
7625 + LOCK_PREFIX "decl %0\n"
7626 + "int $4\n0:\n"
7627 + _ASM_EXTABLE(0b, 0b)
7628 +#endif
7629 +
7630 + "sete %1\n"
7631 + : "+m" (v->counter), "=qm" (c)
7632 + : : "memory");
7633 + return c != 0;
7634 +}
7635 +
7636 +/**
7637 + * atomic_inc_and_test_unchecked - increment and test
7638 + * @v: pointer of type atomic_unchecked_t
7639 + *
7640 + * Atomically increments @v by 1
7641 + * and returns true if the result is zero, or false for all
7642 + * other cases.
7643 + */
7644 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7645 +{
7646 + unsigned char c;
7647 +
7648 + asm volatile(LOCK_PREFIX "incl %0\n"
7649 + "sete %1\n"
7650 : "+m" (v->counter), "=qm" (c)
7651 : : "memory");
7652 return c != 0;
7653 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7654 {
7655 unsigned char c;
7656
7657 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7658 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7659 +
7660 +#ifdef CONFIG_PAX_REFCOUNT
7661 + "jno 0f\n"
7662 + LOCK_PREFIX "subl %2,%0\n"
7663 + "int $4\n0:\n"
7664 + _ASM_EXTABLE(0b, 0b)
7665 +#endif
7666 +
7667 + "sets %1\n"
7668 : "+m" (v->counter), "=qm" (c)
7669 : "ir" (i) : "memory");
7670 return c;
7671 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7672 goto no_xadd;
7673 #endif
7674 /* Modern 486+ processor */
7675 - return i + xadd(&v->counter, i);
7676 + return i + xadd_check_overflow(&v->counter, i);
7677
7678 #ifdef CONFIG_M386
7679 no_xadd: /* Legacy 386 processor */
7680 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7681 }
7682
7683 /**
7684 + * atomic_add_return_unchecked - add integer and return
7685 + * @i: integer value to add
7686 + * @v: pointer of type atomic_unchecked_t
7687 + *
7688 + * Atomically adds @i to @v and returns @i + @v
7689 + */
7690 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7691 +{
7692 +#ifdef CONFIG_M386
7693 + int __i;
7694 + unsigned long flags;
7695 + if (unlikely(boot_cpu_data.x86 <= 3))
7696 + goto no_xadd;
7697 +#endif
7698 + /* Modern 486+ processor */
7699 + return i + xadd(&v->counter, i);
7700 +
7701 +#ifdef CONFIG_M386
7702 +no_xadd: /* Legacy 386 processor */
7703 + raw_local_irq_save(flags);
7704 + __i = atomic_read_unchecked(v);
7705 + atomic_set_unchecked(v, i + __i);
7706 + raw_local_irq_restore(flags);
7707 + return i + __i;
7708 +#endif
7709 +}
7710 +
7711 +/**
7712 * atomic_sub_return - subtract integer and return
7713 * @v: pointer of type atomic_t
7714 * @i: integer value to subtract
7715 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7716 }
7717
7718 #define atomic_inc_return(v) (atomic_add_return(1, v))
7719 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7720 +{
7721 + return atomic_add_return_unchecked(1, v);
7722 +}
7723 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7724
7725 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7726 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7727 return cmpxchg(&v->counter, old, new);
7728 }
7729
7730 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7731 +{
7732 + return cmpxchg(&v->counter, old, new);
7733 +}
7734 +
7735 static inline int atomic_xchg(atomic_t *v, int new)
7736 {
7737 return xchg(&v->counter, new);
7738 }
7739
7740 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7741 +{
7742 + return xchg(&v->counter, new);
7743 +}
7744 +
7745 /**
7746 * __atomic_add_unless - add unless the number is already a given value
7747 * @v: pointer of type atomic_t
7748 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7749 */
7750 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7751 {
7752 - int c, old;
7753 + int c, old, new;
7754 c = atomic_read(v);
7755 for (;;) {
7756 - if (unlikely(c == (u)))
7757 + if (unlikely(c == u))
7758 break;
7759 - old = atomic_cmpxchg((v), c, c + (a));
7760 +
7761 + asm volatile("addl %2,%0\n"
7762 +
7763 +#ifdef CONFIG_PAX_REFCOUNT
7764 + "jno 0f\n"
7765 + "subl %2,%0\n"
7766 + "int $4\n0:\n"
7767 + _ASM_EXTABLE(0b, 0b)
7768 +#endif
7769 +
7770 + : "=r" (new)
7771 + : "0" (c), "ir" (a));
7772 +
7773 + old = atomic_cmpxchg(v, c, new);
7774 if (likely(old == c))
7775 break;
7776 c = old;
7777 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7778 return c;
7779 }
7780
7781 +/**
7782 + * atomic_inc_not_zero_hint - increment if not null
7783 + * @v: pointer of type atomic_t
7784 + * @hint: probable value of the atomic before the increment
7785 + *
7786 + * This version of atomic_inc_not_zero() gives a hint of probable
7787 + * value of the atomic. This helps processor to not read the memory
7788 + * before doing the atomic read/modify/write cycle, lowering
7789 + * number of bus transactions on some arches.
7790 + *
7791 + * Returns: 0 if increment was not done, 1 otherwise.
7792 + */
7793 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7794 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7795 +{
7796 + int val, c = hint, new;
7797 +
7798 + /* sanity test, should be removed by compiler if hint is a constant */
7799 + if (!hint)
7800 + return __atomic_add_unless(v, 1, 0);
7801 +
7802 + do {
7803 + asm volatile("incl %0\n"
7804 +
7805 +#ifdef CONFIG_PAX_REFCOUNT
7806 + "jno 0f\n"
7807 + "decl %0\n"
7808 + "int $4\n0:\n"
7809 + _ASM_EXTABLE(0b, 0b)
7810 +#endif
7811 +
7812 + : "=r" (new)
7813 + : "0" (c));
7814 +
7815 + val = atomic_cmpxchg(v, c, new);
7816 + if (val == c)
7817 + return 1;
7818 + c = val;
7819 + } while (c);
7820 +
7821 + return 0;
7822 +}
7823
7824 /*
7825 * atomic_dec_if_positive - decrement by 1 if old value positive
7826 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7827 index 24098aa..1e37723 100644
7828 --- a/arch/x86/include/asm/atomic64_32.h
7829 +++ b/arch/x86/include/asm/atomic64_32.h
7830 @@ -12,6 +12,14 @@ typedef struct {
7831 u64 __aligned(8) counter;
7832 } atomic64_t;
7833
7834 +#ifdef CONFIG_PAX_REFCOUNT
7835 +typedef struct {
7836 + u64 __aligned(8) counter;
7837 +} atomic64_unchecked_t;
7838 +#else
7839 +typedef atomic64_t atomic64_unchecked_t;
7840 +#endif
7841 +
7842 #define ATOMIC64_INIT(val) { (val) }
7843
7844 #ifdef CONFIG_X86_CMPXCHG64
7845 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7846 }
7847
7848 /**
7849 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7850 + * @p: pointer to type atomic64_unchecked_t
7851 + * @o: expected value
7852 + * @n: new value
7853 + *
7854 + * Atomically sets @v to @n if it was equal to @o and returns
7855 + * the old value.
7856 + */
7857 +
7858 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7859 +{
7860 + return cmpxchg64(&v->counter, o, n);
7861 +}
7862 +
7863 +/**
7864 * atomic64_xchg - xchg atomic64 variable
7865 * @v: pointer to type atomic64_t
7866 * @n: value to assign
7867 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7868 }
7869
7870 /**
7871 + * atomic64_set_unchecked - set atomic64 variable
7872 + * @v: pointer to type atomic64_unchecked_t
7873 + * @n: value to assign
7874 + *
7875 + * Atomically sets the value of @v to @n.
7876 + */
7877 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7878 +{
7879 + unsigned high = (unsigned)(i >> 32);
7880 + unsigned low = (unsigned)i;
7881 + asm volatile(ATOMIC64_ALTERNATIVE(set)
7882 + : "+b" (low), "+c" (high)
7883 + : "S" (v)
7884 + : "eax", "edx", "memory"
7885 + );
7886 +}
7887 +
7888 +/**
7889 * atomic64_read - read atomic64 variable
7890 * @v: pointer to type atomic64_t
7891 *
7892 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7893 }
7894
7895 /**
7896 + * atomic64_read_unchecked - read atomic64 variable
7897 + * @v: pointer to type atomic64_unchecked_t
7898 + *
7899 + * Atomically reads the value of @v and returns it.
7900 + */
7901 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7902 +{
7903 + long long r;
7904 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7905 + : "=A" (r), "+c" (v)
7906 + : : "memory"
7907 + );
7908 + return r;
7909 + }
7910 +
7911 +/**
7912 * atomic64_add_return - add and return
7913 * @i: integer value to add
7914 * @v: pointer to type atomic64_t
7915 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7916 return i;
7917 }
7918
7919 +/**
7920 + * atomic64_add_return_unchecked - add and return
7921 + * @i: integer value to add
7922 + * @v: pointer to type atomic64_unchecked_t
7923 + *
7924 + * Atomically adds @i to @v and returns @i + *@v
7925 + */
7926 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7927 +{
7928 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7929 + : "+A" (i), "+c" (v)
7930 + : : "memory"
7931 + );
7932 + return i;
7933 +}
7934 +
7935 /*
7936 * Other variants with different arithmetic operators:
7937 */
7938 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7939 return a;
7940 }
7941
7942 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7943 +{
7944 + long long a;
7945 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7946 + : "=A" (a)
7947 + : "S" (v)
7948 + : "memory", "ecx"
7949 + );
7950 + return a;
7951 +}
7952 +
7953 static inline long long atomic64_dec_return(atomic64_t *v)
7954 {
7955 long long a;
7956 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7957 }
7958
7959 /**
7960 + * atomic64_add_unchecked - add integer to atomic64 variable
7961 + * @i: integer value to add
7962 + * @v: pointer to type atomic64_unchecked_t
7963 + *
7964 + * Atomically adds @i to @v.
7965 + */
7966 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7967 +{
7968 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7969 + : "+A" (i), "+c" (v)
7970 + : : "memory"
7971 + );
7972 + return i;
7973 +}
7974 +
7975 +/**
7976 * atomic64_sub - subtract the atomic64 variable
7977 * @i: integer value to subtract
7978 * @v: pointer to type atomic64_t
7979 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7980 index 0e1cbfc..5623683 100644
7981 --- a/arch/x86/include/asm/atomic64_64.h
7982 +++ b/arch/x86/include/asm/atomic64_64.h
7983 @@ -18,7 +18,19 @@
7984 */
7985 static inline long atomic64_read(const atomic64_t *v)
7986 {
7987 - return (*(volatile long *)&(v)->counter);
7988 + return (*(volatile const long *)&(v)->counter);
7989 +}
7990 +
7991 +/**
7992 + * atomic64_read_unchecked - read atomic64 variable
7993 + * @v: pointer of type atomic64_unchecked_t
7994 + *
7995 + * Atomically reads the value of @v.
7996 + * Doesn't imply a read memory barrier.
7997 + */
7998 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7999 +{
8000 + return (*(volatile const long *)&(v)->counter);
8001 }
8002
8003 /**
8004 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
8005 }
8006
8007 /**
8008 + * atomic64_set_unchecked - set atomic64 variable
8009 + * @v: pointer to type atomic64_unchecked_t
8010 + * @i: required value
8011 + *
8012 + * Atomically sets the value of @v to @i.
8013 + */
8014 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8015 +{
8016 + v->counter = i;
8017 +}
8018 +
8019 +/**
8020 * atomic64_add - add integer to atomic64 variable
8021 * @i: integer value to add
8022 * @v: pointer to type atomic64_t
8023 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
8024 */
8025 static inline void atomic64_add(long i, atomic64_t *v)
8026 {
8027 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
8028 +
8029 +#ifdef CONFIG_PAX_REFCOUNT
8030 + "jno 0f\n"
8031 + LOCK_PREFIX "subq %1,%0\n"
8032 + "int $4\n0:\n"
8033 + _ASM_EXTABLE(0b, 0b)
8034 +#endif
8035 +
8036 + : "=m" (v->counter)
8037 + : "er" (i), "m" (v->counter));
8038 +}
8039 +
8040 +/**
8041 + * atomic64_add_unchecked - add integer to atomic64 variable
8042 + * @i: integer value to add
8043 + * @v: pointer to type atomic64_unchecked_t
8044 + *
8045 + * Atomically adds @i to @v.
8046 + */
8047 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
8048 +{
8049 asm volatile(LOCK_PREFIX "addq %1,%0"
8050 : "=m" (v->counter)
8051 : "er" (i), "m" (v->counter));
8052 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
8053 */
8054 static inline void atomic64_sub(long i, atomic64_t *v)
8055 {
8056 - asm volatile(LOCK_PREFIX "subq %1,%0"
8057 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
8058 +
8059 +#ifdef CONFIG_PAX_REFCOUNT
8060 + "jno 0f\n"
8061 + LOCK_PREFIX "addq %1,%0\n"
8062 + "int $4\n0:\n"
8063 + _ASM_EXTABLE(0b, 0b)
8064 +#endif
8065 +
8066 + : "=m" (v->counter)
8067 + : "er" (i), "m" (v->counter));
8068 +}
8069 +
8070 +/**
8071 + * atomic64_sub_unchecked - subtract the atomic64 variable
8072 + * @i: integer value to subtract
8073 + * @v: pointer to type atomic64_unchecked_t
8074 + *
8075 + * Atomically subtracts @i from @v.
8076 + */
8077 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
8078 +{
8079 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
8080 : "=m" (v->counter)
8081 : "er" (i), "m" (v->counter));
8082 }
8083 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
8084 {
8085 unsigned char c;
8086
8087 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
8088 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
8089 +
8090 +#ifdef CONFIG_PAX_REFCOUNT
8091 + "jno 0f\n"
8092 + LOCK_PREFIX "addq %2,%0\n"
8093 + "int $4\n0:\n"
8094 + _ASM_EXTABLE(0b, 0b)
8095 +#endif
8096 +
8097 + "sete %1\n"
8098 : "=m" (v->counter), "=qm" (c)
8099 : "er" (i), "m" (v->counter) : "memory");
8100 return c;
8101 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
8102 */
8103 static inline void atomic64_inc(atomic64_t *v)
8104 {
8105 + asm volatile(LOCK_PREFIX "incq %0\n"
8106 +
8107 +#ifdef CONFIG_PAX_REFCOUNT
8108 + "jno 0f\n"
8109 + LOCK_PREFIX "decq %0\n"
8110 + "int $4\n0:\n"
8111 + _ASM_EXTABLE(0b, 0b)
8112 +#endif
8113 +
8114 + : "=m" (v->counter)
8115 + : "m" (v->counter));
8116 +}
8117 +
8118 +/**
8119 + * atomic64_inc_unchecked - increment atomic64 variable
8120 + * @v: pointer to type atomic64_unchecked_t
8121 + *
8122 + * Atomically increments @v by 1.
8123 + */
8124 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8125 +{
8126 asm volatile(LOCK_PREFIX "incq %0"
8127 : "=m" (v->counter)
8128 : "m" (v->counter));
8129 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
8130 */
8131 static inline void atomic64_dec(atomic64_t *v)
8132 {
8133 - asm volatile(LOCK_PREFIX "decq %0"
8134 + asm volatile(LOCK_PREFIX "decq %0\n"
8135 +
8136 +#ifdef CONFIG_PAX_REFCOUNT
8137 + "jno 0f\n"
8138 + LOCK_PREFIX "incq %0\n"
8139 + "int $4\n0:\n"
8140 + _ASM_EXTABLE(0b, 0b)
8141 +#endif
8142 +
8143 + : "=m" (v->counter)
8144 + : "m" (v->counter));
8145 +}
8146 +
8147 +/**
8148 + * atomic64_dec_unchecked - decrement atomic64 variable
8149 + * @v: pointer to type atomic64_t
8150 + *
8151 + * Atomically decrements @v by 1.
8152 + */
8153 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8154 +{
8155 + asm volatile(LOCK_PREFIX "decq %0\n"
8156 : "=m" (v->counter)
8157 : "m" (v->counter));
8158 }
8159 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
8160 {
8161 unsigned char c;
8162
8163 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
8164 + asm volatile(LOCK_PREFIX "decq %0\n"
8165 +
8166 +#ifdef CONFIG_PAX_REFCOUNT
8167 + "jno 0f\n"
8168 + LOCK_PREFIX "incq %0\n"
8169 + "int $4\n0:\n"
8170 + _ASM_EXTABLE(0b, 0b)
8171 +#endif
8172 +
8173 + "sete %1\n"
8174 : "=m" (v->counter), "=qm" (c)
8175 : "m" (v->counter) : "memory");
8176 return c != 0;
8177 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
8178 {
8179 unsigned char c;
8180
8181 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
8182 + asm volatile(LOCK_PREFIX "incq %0\n"
8183 +
8184 +#ifdef CONFIG_PAX_REFCOUNT
8185 + "jno 0f\n"
8186 + LOCK_PREFIX "decq %0\n"
8187 + "int $4\n0:\n"
8188 + _ASM_EXTABLE(0b, 0b)
8189 +#endif
8190 +
8191 + "sete %1\n"
8192 : "=m" (v->counter), "=qm" (c)
8193 : "m" (v->counter) : "memory");
8194 return c != 0;
8195 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
8196 {
8197 unsigned char c;
8198
8199 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8200 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
8201 +
8202 +#ifdef CONFIG_PAX_REFCOUNT
8203 + "jno 0f\n"
8204 + LOCK_PREFIX "subq %2,%0\n"
8205 + "int $4\n0:\n"
8206 + _ASM_EXTABLE(0b, 0b)
8207 +#endif
8208 +
8209 + "sets %1\n"
8210 : "=m" (v->counter), "=qm" (c)
8211 : "er" (i), "m" (v->counter) : "memory");
8212 return c;
8213 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
8214 */
8215 static inline long atomic64_add_return(long i, atomic64_t *v)
8216 {
8217 + return i + xadd_check_overflow(&v->counter, i);
8218 +}
8219 +
8220 +/**
8221 + * atomic64_add_return_unchecked - add and return
8222 + * @i: integer value to add
8223 + * @v: pointer to type atomic64_unchecked_t
8224 + *
8225 + * Atomically adds @i to @v and returns @i + @v
8226 + */
8227 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8228 +{
8229 return i + xadd(&v->counter, i);
8230 }
8231
8232 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
8233 }
8234
8235 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8236 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8237 +{
8238 + return atomic64_add_return_unchecked(1, v);
8239 +}
8240 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8241
8242 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8243 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8244 return cmpxchg(&v->counter, old, new);
8245 }
8246
8247 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8248 +{
8249 + return cmpxchg(&v->counter, old, new);
8250 +}
8251 +
8252 static inline long atomic64_xchg(atomic64_t *v, long new)
8253 {
8254 return xchg(&v->counter, new);
8255 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8256 */
8257 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8258 {
8259 - long c, old;
8260 + long c, old, new;
8261 c = atomic64_read(v);
8262 for (;;) {
8263 - if (unlikely(c == (u)))
8264 + if (unlikely(c == u))
8265 break;
8266 - old = atomic64_cmpxchg((v), c, c + (a));
8267 +
8268 + asm volatile("add %2,%0\n"
8269 +
8270 +#ifdef CONFIG_PAX_REFCOUNT
8271 + "jno 0f\n"
8272 + "sub %2,%0\n"
8273 + "int $4\n0:\n"
8274 + _ASM_EXTABLE(0b, 0b)
8275 +#endif
8276 +
8277 + : "=r" (new)
8278 + : "0" (c), "ir" (a));
8279 +
8280 + old = atomic64_cmpxchg(v, c, new);
8281 if (likely(old == c))
8282 break;
8283 c = old;
8284 }
8285 - return c != (u);
8286 + return c != u;
8287 }
8288
8289 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8290 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8291 index 1775d6e..b65017f 100644
8292 --- a/arch/x86/include/asm/bitops.h
8293 +++ b/arch/x86/include/asm/bitops.h
8294 @@ -38,7 +38,7 @@
8295 * a mask operation on a byte.
8296 */
8297 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8298 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8299 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8300 #define CONST_MASK(nr) (1 << ((nr) & 7))
8301
8302 /**
8303 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8304 index 5e1a2ee..c9f9533 100644
8305 --- a/arch/x86/include/asm/boot.h
8306 +++ b/arch/x86/include/asm/boot.h
8307 @@ -11,10 +11,15 @@
8308 #include <asm/pgtable_types.h>
8309
8310 /* Physical address where kernel should be loaded. */
8311 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8312 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8313 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8314 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8315
8316 +#ifndef __ASSEMBLY__
8317 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8318 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8319 +#endif
8320 +
8321 /* Minimum kernel alignment, as a power of two */
8322 #ifdef CONFIG_X86_64
8323 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8324 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8325 index 48f99f1..d78ebf9 100644
8326 --- a/arch/x86/include/asm/cache.h
8327 +++ b/arch/x86/include/asm/cache.h
8328 @@ -5,12 +5,13 @@
8329
8330 /* L1 cache line size */
8331 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8332 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8333 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8334
8335 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8336 +#define __read_only __attribute__((__section__(".data..read_only")))
8337
8338 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8339 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8340 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8341
8342 #ifdef CONFIG_X86_VSMP
8343 #ifdef CONFIG_SMP
8344 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8345 index 4e12668..501d239 100644
8346 --- a/arch/x86/include/asm/cacheflush.h
8347 +++ b/arch/x86/include/asm/cacheflush.h
8348 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8349 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8350
8351 if (pg_flags == _PGMT_DEFAULT)
8352 - return -1;
8353 + return ~0UL;
8354 else if (pg_flags == _PGMT_WC)
8355 return _PAGE_CACHE_WC;
8356 else if (pg_flags == _PGMT_UC_MINUS)
8357 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8358 index 46fc474..b02b0f9 100644
8359 --- a/arch/x86/include/asm/checksum_32.h
8360 +++ b/arch/x86/include/asm/checksum_32.h
8361 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8362 int len, __wsum sum,
8363 int *src_err_ptr, int *dst_err_ptr);
8364
8365 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8366 + int len, __wsum sum,
8367 + int *src_err_ptr, int *dst_err_ptr);
8368 +
8369 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8370 + int len, __wsum sum,
8371 + int *src_err_ptr, int *dst_err_ptr);
8372 +
8373 /*
8374 * Note: when you get a NULL pointer exception here this means someone
8375 * passed in an incorrect kernel address to one of these functions.
8376 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8377 int *err_ptr)
8378 {
8379 might_sleep();
8380 - return csum_partial_copy_generic((__force void *)src, dst,
8381 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8382 len, sum, err_ptr, NULL);
8383 }
8384
8385 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8386 {
8387 might_sleep();
8388 if (access_ok(VERIFY_WRITE, dst, len))
8389 - return csum_partial_copy_generic(src, (__force void *)dst,
8390 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8391 len, sum, NULL, err_ptr);
8392
8393 if (len)
8394 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8395 index 5d3acdf..6447a02 100644
8396 --- a/arch/x86/include/asm/cmpxchg.h
8397 +++ b/arch/x86/include/asm/cmpxchg.h
8398 @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8399 __compiletime_error("Bad argument size for cmpxchg");
8400 extern void __xadd_wrong_size(void)
8401 __compiletime_error("Bad argument size for xadd");
8402 +extern void __xadd_check_overflow_wrong_size(void)
8403 + __compiletime_error("Bad argument size for xadd_check_overflow");
8404
8405 /*
8406 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8407 @@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8408 __ret; \
8409 })
8410
8411 +#define __xadd_check_overflow(ptr, inc, lock) \
8412 + ({ \
8413 + __typeof__ (*(ptr)) __ret = (inc); \
8414 + switch (sizeof(*(ptr))) { \
8415 + case __X86_CASE_L: \
8416 + asm volatile (lock "xaddl %0, %1\n" \
8417 + "jno 0f\n" \
8418 + "mov %0,%1\n" \
8419 + "int $4\n0:\n" \
8420 + _ASM_EXTABLE(0b, 0b) \
8421 + : "+r" (__ret), "+m" (*(ptr)) \
8422 + : : "memory", "cc"); \
8423 + break; \
8424 + case __X86_CASE_Q: \
8425 + asm volatile (lock "xaddq %q0, %1\n" \
8426 + "jno 0f\n" \
8427 + "mov %0,%1\n" \
8428 + "int $4\n0:\n" \
8429 + _ASM_EXTABLE(0b, 0b) \
8430 + : "+r" (__ret), "+m" (*(ptr)) \
8431 + : : "memory", "cc"); \
8432 + break; \
8433 + default: \
8434 + __xadd_check_overflow_wrong_size(); \
8435 + } \
8436 + __ret; \
8437 + })
8438 +
8439 /*
8440 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8441 * value of "*ptr".
8442 @@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8443 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8444 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8445
8446 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8447 +
8448 #endif /* ASM_X86_CMPXCHG_H */
8449 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8450 index f3444f7..051a196 100644
8451 --- a/arch/x86/include/asm/cpufeature.h
8452 +++ b/arch/x86/include/asm/cpufeature.h
8453 @@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8454 ".section .discard,\"aw\",@progbits\n"
8455 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8456 ".previous\n"
8457 - ".section .altinstr_replacement,\"ax\"\n"
8458 + ".section .altinstr_replacement,\"a\"\n"
8459 "3: movb $1,%0\n"
8460 "4:\n"
8461 ".previous\n"
8462 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8463 index 41935fa..3b40db8 100644
8464 --- a/arch/x86/include/asm/desc.h
8465 +++ b/arch/x86/include/asm/desc.h
8466 @@ -4,6 +4,7 @@
8467 #include <asm/desc_defs.h>
8468 #include <asm/ldt.h>
8469 #include <asm/mmu.h>
8470 +#include <asm/pgtable.h>
8471
8472 #include <linux/smp.h>
8473
8474 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8475
8476 desc->type = (info->read_exec_only ^ 1) << 1;
8477 desc->type |= info->contents << 2;
8478 + desc->type |= info->seg_not_present ^ 1;
8479
8480 desc->s = 1;
8481 desc->dpl = 0x3;
8482 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8483 }
8484
8485 extern struct desc_ptr idt_descr;
8486 -extern gate_desc idt_table[];
8487 -
8488 -struct gdt_page {
8489 - struct desc_struct gdt[GDT_ENTRIES];
8490 -} __attribute__((aligned(PAGE_SIZE)));
8491 -
8492 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8493 +extern gate_desc idt_table[256];
8494
8495 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8496 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8497 {
8498 - return per_cpu(gdt_page, cpu).gdt;
8499 + return cpu_gdt_table[cpu];
8500 }
8501
8502 #ifdef CONFIG_X86_64
8503 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8504 unsigned long base, unsigned dpl, unsigned flags,
8505 unsigned short seg)
8506 {
8507 - gate->a = (seg << 16) | (base & 0xffff);
8508 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8509 + gate->gate.offset_low = base;
8510 + gate->gate.seg = seg;
8511 + gate->gate.reserved = 0;
8512 + gate->gate.type = type;
8513 + gate->gate.s = 0;
8514 + gate->gate.dpl = dpl;
8515 + gate->gate.p = 1;
8516 + gate->gate.offset_high = base >> 16;
8517 }
8518
8519 #endif
8520 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8521
8522 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8523 {
8524 + pax_open_kernel();
8525 memcpy(&idt[entry], gate, sizeof(*gate));
8526 + pax_close_kernel();
8527 }
8528
8529 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8530 {
8531 + pax_open_kernel();
8532 memcpy(&ldt[entry], desc, 8);
8533 + pax_close_kernel();
8534 }
8535
8536 static inline void
8537 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8538 default: size = sizeof(*gdt); break;
8539 }
8540
8541 + pax_open_kernel();
8542 memcpy(&gdt[entry], desc, size);
8543 + pax_close_kernel();
8544 }
8545
8546 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8547 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8548
8549 static inline void native_load_tr_desc(void)
8550 {
8551 + pax_open_kernel();
8552 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8553 + pax_close_kernel();
8554 }
8555
8556 static inline void native_load_gdt(const struct desc_ptr *dtr)
8557 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8558 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8559 unsigned int i;
8560
8561 + pax_open_kernel();
8562 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8563 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8564 + pax_close_kernel();
8565 }
8566
8567 #define _LDT_empty(info) \
8568 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8569 desc->limit = (limit >> 16) & 0xf;
8570 }
8571
8572 -static inline void _set_gate(int gate, unsigned type, void *addr,
8573 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8574 unsigned dpl, unsigned ist, unsigned seg)
8575 {
8576 gate_desc s;
8577 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8578 * Pentium F0 0F bugfix can have resulted in the mapped
8579 * IDT being write-protected.
8580 */
8581 -static inline void set_intr_gate(unsigned int n, void *addr)
8582 +static inline void set_intr_gate(unsigned int n, const void *addr)
8583 {
8584 BUG_ON((unsigned)n > 0xFF);
8585 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8586 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8587 /*
8588 * This routine sets up an interrupt gate at directory privilege level 3.
8589 */
8590 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8591 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8592 {
8593 BUG_ON((unsigned)n > 0xFF);
8594 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8595 }
8596
8597 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8598 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8599 {
8600 BUG_ON((unsigned)n > 0xFF);
8601 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8602 }
8603
8604 -static inline void set_trap_gate(unsigned int n, void *addr)
8605 +static inline void set_trap_gate(unsigned int n, const void *addr)
8606 {
8607 BUG_ON((unsigned)n > 0xFF);
8608 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8609 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8610 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8611 {
8612 BUG_ON((unsigned)n > 0xFF);
8613 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8614 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8615 }
8616
8617 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8618 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8619 {
8620 BUG_ON((unsigned)n > 0xFF);
8621 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8622 }
8623
8624 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8625 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8626 {
8627 BUG_ON((unsigned)n > 0xFF);
8628 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8629 }
8630
8631 +#ifdef CONFIG_X86_32
8632 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8633 +{
8634 + struct desc_struct d;
8635 +
8636 + if (likely(limit))
8637 + limit = (limit - 1UL) >> PAGE_SHIFT;
8638 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8639 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8640 +}
8641 +#endif
8642 +
8643 #endif /* _ASM_X86_DESC_H */
8644 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8645 index 278441f..b95a174 100644
8646 --- a/arch/x86/include/asm/desc_defs.h
8647 +++ b/arch/x86/include/asm/desc_defs.h
8648 @@ -31,6 +31,12 @@ struct desc_struct {
8649 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8650 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8651 };
8652 + struct {
8653 + u16 offset_low;
8654 + u16 seg;
8655 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8656 + unsigned offset_high: 16;
8657 + } gate;
8658 };
8659 } __attribute__((packed));
8660
8661 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8662 index 908b969..a1f4eb4 100644
8663 --- a/arch/x86/include/asm/e820.h
8664 +++ b/arch/x86/include/asm/e820.h
8665 @@ -69,7 +69,7 @@ struct e820map {
8666 #define ISA_START_ADDRESS 0xa0000
8667 #define ISA_END_ADDRESS 0x100000
8668
8669 -#define BIOS_BEGIN 0x000a0000
8670 +#define BIOS_BEGIN 0x000c0000
8671 #define BIOS_END 0x00100000
8672
8673 #define BIOS_ROM_BASE 0xffe00000
8674 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8675 index 5f962df..7289f09 100644
8676 --- a/arch/x86/include/asm/elf.h
8677 +++ b/arch/x86/include/asm/elf.h
8678 @@ -238,7 +238,25 @@ extern int force_personality32;
8679 the loader. We need to make sure that it is out of the way of the program
8680 that it will "exec", and that there is sufficient room for the brk. */
8681
8682 +#ifdef CONFIG_PAX_SEGMEXEC
8683 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8684 +#else
8685 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8686 +#endif
8687 +
8688 +#ifdef CONFIG_PAX_ASLR
8689 +#ifdef CONFIG_X86_32
8690 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8691 +
8692 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8693 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8694 +#else
8695 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8696 +
8697 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8698 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8699 +#endif
8700 +#endif
8701
8702 /* This yields a mask that user programs can use to figure out what
8703 instruction set this CPU supports. This could be done in user space,
8704 @@ -291,9 +309,7 @@ do { \
8705
8706 #define ARCH_DLINFO \
8707 do { \
8708 - if (vdso_enabled) \
8709 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8710 - (unsigned long)current->mm->context.vdso); \
8711 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8712 } while (0)
8713
8714 #define AT_SYSINFO 32
8715 @@ -304,7 +320,7 @@ do { \
8716
8717 #endif /* !CONFIG_X86_32 */
8718
8719 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8720 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8721
8722 #define VDSO_ENTRY \
8723 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8724 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8725 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8726 #define compat_arch_setup_additional_pages syscall32_setup_pages
8727
8728 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8729 -#define arch_randomize_brk arch_randomize_brk
8730 -
8731 /*
8732 * True on X86_32 or when emulating IA32 on X86_64
8733 */
8734 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8735 index cc70c1c..d96d011 100644
8736 --- a/arch/x86/include/asm/emergency-restart.h
8737 +++ b/arch/x86/include/asm/emergency-restart.h
8738 @@ -15,6 +15,6 @@ enum reboot_type {
8739
8740 extern enum reboot_type reboot_type;
8741
8742 -extern void machine_emergency_restart(void);
8743 +extern void machine_emergency_restart(void) __noreturn;
8744
8745 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8746 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8747 index d09bb03..4ea4194 100644
8748 --- a/arch/x86/include/asm/futex.h
8749 +++ b/arch/x86/include/asm/futex.h
8750 @@ -12,16 +12,18 @@
8751 #include <asm/system.h>
8752
8753 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8754 + typecheck(u32 __user *, uaddr); \
8755 asm volatile("1:\t" insn "\n" \
8756 "2:\t.section .fixup,\"ax\"\n" \
8757 "3:\tmov\t%3, %1\n" \
8758 "\tjmp\t2b\n" \
8759 "\t.previous\n" \
8760 _ASM_EXTABLE(1b, 3b) \
8761 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8762 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8763 : "i" (-EFAULT), "0" (oparg), "1" (0))
8764
8765 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8766 + typecheck(u32 __user *, uaddr); \
8767 asm volatile("1:\tmovl %2, %0\n" \
8768 "\tmovl\t%0, %3\n" \
8769 "\t" insn "\n" \
8770 @@ -34,7 +36,7 @@
8771 _ASM_EXTABLE(1b, 4b) \
8772 _ASM_EXTABLE(2b, 4b) \
8773 : "=&a" (oldval), "=&r" (ret), \
8774 - "+m" (*uaddr), "=&r" (tem) \
8775 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8776 : "r" (oparg), "i" (-EFAULT), "1" (0))
8777
8778 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8779 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8780
8781 switch (op) {
8782 case FUTEX_OP_SET:
8783 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8784 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8785 break;
8786 case FUTEX_OP_ADD:
8787 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8788 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8789 uaddr, oparg);
8790 break;
8791 case FUTEX_OP_OR:
8792 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8793 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8794 return -EFAULT;
8795
8796 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8797 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8798 "2:\t.section .fixup, \"ax\"\n"
8799 "3:\tmov %3, %0\n"
8800 "\tjmp 2b\n"
8801 "\t.previous\n"
8802 _ASM_EXTABLE(1b, 3b)
8803 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8804 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8805 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8806 : "memory"
8807 );
8808 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8809 index eb92a6e..b98b2f4 100644
8810 --- a/arch/x86/include/asm/hw_irq.h
8811 +++ b/arch/x86/include/asm/hw_irq.h
8812 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8813 extern void enable_IO_APIC(void);
8814
8815 /* Statistics */
8816 -extern atomic_t irq_err_count;
8817 -extern atomic_t irq_mis_count;
8818 +extern atomic_unchecked_t irq_err_count;
8819 +extern atomic_unchecked_t irq_mis_count;
8820
8821 /* EISA */
8822 extern void eisa_set_level_irq(unsigned int irq);
8823 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8824 index c9e09ea..73888df 100644
8825 --- a/arch/x86/include/asm/i387.h
8826 +++ b/arch/x86/include/asm/i387.h
8827 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8828 {
8829 int err;
8830
8831 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8832 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8833 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8834 +#endif
8835 +
8836 /* See comment in fxsave() below. */
8837 #ifdef CONFIG_AS_FXSAVEQ
8838 asm volatile("1: fxrstorq %[fx]\n\t"
8839 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8840 {
8841 int err;
8842
8843 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8844 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8845 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8846 +#endif
8847 +
8848 /*
8849 * Clear the bytes not touched by the fxsave and reserved
8850 * for the SW usage.
8851 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8852 #endif /* CONFIG_X86_64 */
8853
8854 /* We need a safe address that is cheap to find and that is already
8855 - in L1 during context switch. The best choices are unfortunately
8856 - different for UP and SMP */
8857 -#ifdef CONFIG_SMP
8858 -#define safe_address (__per_cpu_offset[0])
8859 -#else
8860 -#define safe_address (kstat_cpu(0).cpustat.user)
8861 -#endif
8862 + in L1 during context switch. */
8863 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8864
8865 /*
8866 * These must be called with preempt disabled
8867 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8868 struct thread_info *me = current_thread_info();
8869 preempt_disable();
8870 if (me->status & TS_USEDFPU)
8871 - __save_init_fpu(me->task);
8872 + __save_init_fpu(current);
8873 else
8874 clts();
8875 }
8876 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8877 index d8e8eef..99f81ae 100644
8878 --- a/arch/x86/include/asm/io.h
8879 +++ b/arch/x86/include/asm/io.h
8880 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8881
8882 #include <linux/vmalloc.h>
8883
8884 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8885 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8886 +{
8887 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8888 +}
8889 +
8890 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8891 +{
8892 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8893 +}
8894 +
8895 /*
8896 * Convert a virtual cached pointer to an uncached pointer
8897 */
8898 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8899 index bba3cf8..06bc8da 100644
8900 --- a/arch/x86/include/asm/irqflags.h
8901 +++ b/arch/x86/include/asm/irqflags.h
8902 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8903 sti; \
8904 sysexit
8905
8906 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8907 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8908 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8909 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8910 +
8911 #else
8912 #define INTERRUPT_RETURN iret
8913 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8914 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8915 index 5478825..839e88c 100644
8916 --- a/arch/x86/include/asm/kprobes.h
8917 +++ b/arch/x86/include/asm/kprobes.h
8918 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8919 #define RELATIVEJUMP_SIZE 5
8920 #define RELATIVECALL_OPCODE 0xe8
8921 #define RELATIVE_ADDR_SIZE 4
8922 -#define MAX_STACK_SIZE 64
8923 -#define MIN_STACK_SIZE(ADDR) \
8924 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8925 - THREAD_SIZE - (unsigned long)(ADDR))) \
8926 - ? (MAX_STACK_SIZE) \
8927 - : (((unsigned long)current_thread_info()) + \
8928 - THREAD_SIZE - (unsigned long)(ADDR)))
8929 +#define MAX_STACK_SIZE 64UL
8930 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8931
8932 #define flush_insn_slot(p) do { } while (0)
8933
8934 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8935 index b4973f4..7c4d3fc 100644
8936 --- a/arch/x86/include/asm/kvm_host.h
8937 +++ b/arch/x86/include/asm/kvm_host.h
8938 @@ -459,7 +459,7 @@ struct kvm_arch {
8939 unsigned int n_requested_mmu_pages;
8940 unsigned int n_max_mmu_pages;
8941 unsigned int indirect_shadow_pages;
8942 - atomic_t invlpg_counter;
8943 + atomic_unchecked_t invlpg_counter;
8944 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8945 /*
8946 * Hash table of struct kvm_mmu_page.
8947 @@ -638,7 +638,7 @@ struct kvm_x86_ops {
8948 int (*check_intercept)(struct kvm_vcpu *vcpu,
8949 struct x86_instruction_info *info,
8950 enum x86_intercept_stage stage);
8951 -};
8952 +} __do_const;
8953
8954 struct kvm_arch_async_pf {
8955 u32 token;
8956 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8957 index 9cdae5d..300d20f 100644
8958 --- a/arch/x86/include/asm/local.h
8959 +++ b/arch/x86/include/asm/local.h
8960 @@ -18,26 +18,58 @@ typedef struct {
8961
8962 static inline void local_inc(local_t *l)
8963 {
8964 - asm volatile(_ASM_INC "%0"
8965 + asm volatile(_ASM_INC "%0\n"
8966 +
8967 +#ifdef CONFIG_PAX_REFCOUNT
8968 + "jno 0f\n"
8969 + _ASM_DEC "%0\n"
8970 + "int $4\n0:\n"
8971 + _ASM_EXTABLE(0b, 0b)
8972 +#endif
8973 +
8974 : "+m" (l->a.counter));
8975 }
8976
8977 static inline void local_dec(local_t *l)
8978 {
8979 - asm volatile(_ASM_DEC "%0"
8980 + asm volatile(_ASM_DEC "%0\n"
8981 +
8982 +#ifdef CONFIG_PAX_REFCOUNT
8983 + "jno 0f\n"
8984 + _ASM_INC "%0\n"
8985 + "int $4\n0:\n"
8986 + _ASM_EXTABLE(0b, 0b)
8987 +#endif
8988 +
8989 : "+m" (l->a.counter));
8990 }
8991
8992 static inline void local_add(long i, local_t *l)
8993 {
8994 - asm volatile(_ASM_ADD "%1,%0"
8995 + asm volatile(_ASM_ADD "%1,%0\n"
8996 +
8997 +#ifdef CONFIG_PAX_REFCOUNT
8998 + "jno 0f\n"
8999 + _ASM_SUB "%1,%0\n"
9000 + "int $4\n0:\n"
9001 + _ASM_EXTABLE(0b, 0b)
9002 +#endif
9003 +
9004 : "+m" (l->a.counter)
9005 : "ir" (i));
9006 }
9007
9008 static inline void local_sub(long i, local_t *l)
9009 {
9010 - asm volatile(_ASM_SUB "%1,%0"
9011 + asm volatile(_ASM_SUB "%1,%0\n"
9012 +
9013 +#ifdef CONFIG_PAX_REFCOUNT
9014 + "jno 0f\n"
9015 + _ASM_ADD "%1,%0\n"
9016 + "int $4\n0:\n"
9017 + _ASM_EXTABLE(0b, 0b)
9018 +#endif
9019 +
9020 : "+m" (l->a.counter)
9021 : "ir" (i));
9022 }
9023 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
9024 {
9025 unsigned char c;
9026
9027 - asm volatile(_ASM_SUB "%2,%0; sete %1"
9028 + asm volatile(_ASM_SUB "%2,%0\n"
9029 +
9030 +#ifdef CONFIG_PAX_REFCOUNT
9031 + "jno 0f\n"
9032 + _ASM_ADD "%2,%0\n"
9033 + "int $4\n0:\n"
9034 + _ASM_EXTABLE(0b, 0b)
9035 +#endif
9036 +
9037 + "sete %1\n"
9038 : "+m" (l->a.counter), "=qm" (c)
9039 : "ir" (i) : "memory");
9040 return c;
9041 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
9042 {
9043 unsigned char c;
9044
9045 - asm volatile(_ASM_DEC "%0; sete %1"
9046 + asm volatile(_ASM_DEC "%0\n"
9047 +
9048 +#ifdef CONFIG_PAX_REFCOUNT
9049 + "jno 0f\n"
9050 + _ASM_INC "%0\n"
9051 + "int $4\n0:\n"
9052 + _ASM_EXTABLE(0b, 0b)
9053 +#endif
9054 +
9055 + "sete %1\n"
9056 : "+m" (l->a.counter), "=qm" (c)
9057 : : "memory");
9058 return c != 0;
9059 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
9060 {
9061 unsigned char c;
9062
9063 - asm volatile(_ASM_INC "%0; sete %1"
9064 + asm volatile(_ASM_INC "%0\n"
9065 +
9066 +#ifdef CONFIG_PAX_REFCOUNT
9067 + "jno 0f\n"
9068 + _ASM_DEC "%0\n"
9069 + "int $4\n0:\n"
9070 + _ASM_EXTABLE(0b, 0b)
9071 +#endif
9072 +
9073 + "sete %1\n"
9074 : "+m" (l->a.counter), "=qm" (c)
9075 : : "memory");
9076 return c != 0;
9077 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
9078 {
9079 unsigned char c;
9080
9081 - asm volatile(_ASM_ADD "%2,%0; sets %1"
9082 + asm volatile(_ASM_ADD "%2,%0\n"
9083 +
9084 +#ifdef CONFIG_PAX_REFCOUNT
9085 + "jno 0f\n"
9086 + _ASM_SUB "%2,%0\n"
9087 + "int $4\n0:\n"
9088 + _ASM_EXTABLE(0b, 0b)
9089 +#endif
9090 +
9091 + "sets %1\n"
9092 : "+m" (l->a.counter), "=qm" (c)
9093 : "ir" (i) : "memory");
9094 return c;
9095 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
9096 #endif
9097 /* Modern 486+ processor */
9098 __i = i;
9099 - asm volatile(_ASM_XADD "%0, %1;"
9100 + asm volatile(_ASM_XADD "%0, %1\n"
9101 +
9102 +#ifdef CONFIG_PAX_REFCOUNT
9103 + "jno 0f\n"
9104 + _ASM_MOV "%0,%1\n"
9105 + "int $4\n0:\n"
9106 + _ASM_EXTABLE(0b, 0b)
9107 +#endif
9108 +
9109 : "+r" (i), "+m" (l->a.counter)
9110 : : "memory");
9111 return i + __i;
9112 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
9113 index 593e51d..fa69c9a 100644
9114 --- a/arch/x86/include/asm/mman.h
9115 +++ b/arch/x86/include/asm/mman.h
9116 @@ -5,4 +5,14 @@
9117
9118 #include <asm-generic/mman.h>
9119
9120 +#ifdef __KERNEL__
9121 +#ifndef __ASSEMBLY__
9122 +#ifdef CONFIG_X86_32
9123 +#define arch_mmap_check i386_mmap_check
9124 +int i386_mmap_check(unsigned long addr, unsigned long len,
9125 + unsigned long flags);
9126 +#endif
9127 +#endif
9128 +#endif
9129 +
9130 #endif /* _ASM_X86_MMAN_H */
9131 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
9132 index 5f55e69..e20bfb1 100644
9133 --- a/arch/x86/include/asm/mmu.h
9134 +++ b/arch/x86/include/asm/mmu.h
9135 @@ -9,7 +9,7 @@
9136 * we put the segment information here.
9137 */
9138 typedef struct {
9139 - void *ldt;
9140 + struct desc_struct *ldt;
9141 int size;
9142
9143 #ifdef CONFIG_X86_64
9144 @@ -18,7 +18,19 @@ typedef struct {
9145 #endif
9146
9147 struct mutex lock;
9148 - void *vdso;
9149 + unsigned long vdso;
9150 +
9151 +#ifdef CONFIG_X86_32
9152 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9153 + unsigned long user_cs_base;
9154 + unsigned long user_cs_limit;
9155 +
9156 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9157 + cpumask_t cpu_user_cs_mask;
9158 +#endif
9159 +
9160 +#endif
9161 +#endif
9162 } mm_context_t;
9163
9164 #ifdef CONFIG_SMP
9165 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
9166 index 6902152..399f3a2 100644
9167 --- a/arch/x86/include/asm/mmu_context.h
9168 +++ b/arch/x86/include/asm/mmu_context.h
9169 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
9170
9171 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9172 {
9173 +
9174 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9175 + unsigned int i;
9176 + pgd_t *pgd;
9177 +
9178 + pax_open_kernel();
9179 + pgd = get_cpu_pgd(smp_processor_id());
9180 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9181 + set_pgd_batched(pgd+i, native_make_pgd(0));
9182 + pax_close_kernel();
9183 +#endif
9184 +
9185 #ifdef CONFIG_SMP
9186 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9187 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9188 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9189 struct task_struct *tsk)
9190 {
9191 unsigned cpu = smp_processor_id();
9192 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9193 + int tlbstate = TLBSTATE_OK;
9194 +#endif
9195
9196 if (likely(prev != next)) {
9197 #ifdef CONFIG_SMP
9198 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9199 + tlbstate = percpu_read(cpu_tlbstate.state);
9200 +#endif
9201 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9202 percpu_write(cpu_tlbstate.active_mm, next);
9203 #endif
9204 cpumask_set_cpu(cpu, mm_cpumask(next));
9205
9206 /* Re-load page tables */
9207 +#ifdef CONFIG_PAX_PER_CPU_PGD
9208 + pax_open_kernel();
9209 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9210 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9211 + pax_close_kernel();
9212 + load_cr3(get_cpu_pgd(cpu));
9213 +#else
9214 load_cr3(next->pgd);
9215 +#endif
9216
9217 /* stop flush ipis for the previous mm */
9218 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9219 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9220 */
9221 if (unlikely(prev->context.ldt != next->context.ldt))
9222 load_LDT_nolock(&next->context);
9223 - }
9224 +
9225 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9226 + if (!(__supported_pte_mask & _PAGE_NX)) {
9227 + smp_mb__before_clear_bit();
9228 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9229 + smp_mb__after_clear_bit();
9230 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9231 + }
9232 +#endif
9233 +
9234 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9235 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9236 + prev->context.user_cs_limit != next->context.user_cs_limit))
9237 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9238 #ifdef CONFIG_SMP
9239 + else if (unlikely(tlbstate != TLBSTATE_OK))
9240 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9241 +#endif
9242 +#endif
9243 +
9244 + }
9245 else {
9246 +
9247 +#ifdef CONFIG_PAX_PER_CPU_PGD
9248 + pax_open_kernel();
9249 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9250 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9251 + pax_close_kernel();
9252 + load_cr3(get_cpu_pgd(cpu));
9253 +#endif
9254 +
9255 +#ifdef CONFIG_SMP
9256 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9257 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9258
9259 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9260 * tlb flush IPI delivery. We must reload CR3
9261 * to make sure to use no freed page tables.
9262 */
9263 +
9264 +#ifndef CONFIG_PAX_PER_CPU_PGD
9265 load_cr3(next->pgd);
9266 +#endif
9267 +
9268 load_LDT_nolock(&next->context);
9269 +
9270 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9271 + if (!(__supported_pte_mask & _PAGE_NX))
9272 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9273 +#endif
9274 +
9275 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9276 +#ifdef CONFIG_PAX_PAGEEXEC
9277 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9278 +#endif
9279 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9280 +#endif
9281 +
9282 }
9283 +#endif
9284 }
9285 -#endif
9286 }
9287
9288 #define activate_mm(prev, next) \
9289 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9290 index 9eae775..c914fea 100644
9291 --- a/arch/x86/include/asm/module.h
9292 +++ b/arch/x86/include/asm/module.h
9293 @@ -5,6 +5,7 @@
9294
9295 #ifdef CONFIG_X86_64
9296 /* X86_64 does not define MODULE_PROC_FAMILY */
9297 +#define MODULE_PROC_FAMILY ""
9298 #elif defined CONFIG_M386
9299 #define MODULE_PROC_FAMILY "386 "
9300 #elif defined CONFIG_M486
9301 @@ -59,8 +60,20 @@
9302 #error unknown processor family
9303 #endif
9304
9305 -#ifdef CONFIG_X86_32
9306 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9307 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9308 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9309 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9310 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9311 +#else
9312 +#define MODULE_PAX_KERNEXEC ""
9313 #endif
9314
9315 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9316 +#define MODULE_PAX_UDEREF "UDEREF "
9317 +#else
9318 +#define MODULE_PAX_UDEREF ""
9319 +#endif
9320 +
9321 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9322 +
9323 #endif /* _ASM_X86_MODULE_H */
9324 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9325 index 7639dbf..e08a58c 100644
9326 --- a/arch/x86/include/asm/page_64_types.h
9327 +++ b/arch/x86/include/asm/page_64_types.h
9328 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9329
9330 /* duplicated to the one in bootmem.h */
9331 extern unsigned long max_pfn;
9332 -extern unsigned long phys_base;
9333 +extern const unsigned long phys_base;
9334
9335 extern unsigned long __phys_addr(unsigned long);
9336 #define __phys_reloc_hide(x) (x)
9337 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9338 index a7d2db9..edb023e 100644
9339 --- a/arch/x86/include/asm/paravirt.h
9340 +++ b/arch/x86/include/asm/paravirt.h
9341 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9342 val);
9343 }
9344
9345 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9346 +{
9347 + pgdval_t val = native_pgd_val(pgd);
9348 +
9349 + if (sizeof(pgdval_t) > sizeof(long))
9350 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9351 + val, (u64)val >> 32);
9352 + else
9353 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9354 + val);
9355 +}
9356 +
9357 static inline void pgd_clear(pgd_t *pgdp)
9358 {
9359 set_pgd(pgdp, __pgd(0));
9360 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9361 pv_mmu_ops.set_fixmap(idx, phys, flags);
9362 }
9363
9364 +#ifdef CONFIG_PAX_KERNEXEC
9365 +static inline unsigned long pax_open_kernel(void)
9366 +{
9367 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9368 +}
9369 +
9370 +static inline unsigned long pax_close_kernel(void)
9371 +{
9372 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9373 +}
9374 +#else
9375 +static inline unsigned long pax_open_kernel(void) { return 0; }
9376 +static inline unsigned long pax_close_kernel(void) { return 0; }
9377 +#endif
9378 +
9379 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9380
9381 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9382 @@ -964,7 +991,7 @@ extern void default_banner(void);
9383
9384 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9385 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9386 -#define PARA_INDIRECT(addr) *%cs:addr
9387 +#define PARA_INDIRECT(addr) *%ss:addr
9388 #endif
9389
9390 #define INTERRUPT_RETURN \
9391 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
9392 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9393 CLBR_NONE, \
9394 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9395 +
9396 +#define GET_CR0_INTO_RDI \
9397 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9398 + mov %rax,%rdi
9399 +
9400 +#define SET_RDI_INTO_CR0 \
9401 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9402 +
9403 +#define GET_CR3_INTO_RDI \
9404 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9405 + mov %rax,%rdi
9406 +
9407 +#define SET_RDI_INTO_CR3 \
9408 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9409 +
9410 #endif /* CONFIG_X86_32 */
9411
9412 #endif /* __ASSEMBLY__ */
9413 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9414 index 8e8b9a4..f07d725 100644
9415 --- a/arch/x86/include/asm/paravirt_types.h
9416 +++ b/arch/x86/include/asm/paravirt_types.h
9417 @@ -84,20 +84,20 @@ struct pv_init_ops {
9418 */
9419 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9420 unsigned long addr, unsigned len);
9421 -};
9422 +} __no_const;
9423
9424
9425 struct pv_lazy_ops {
9426 /* Set deferred update mode, used for batching operations. */
9427 void (*enter)(void);
9428 void (*leave)(void);
9429 -};
9430 +} __no_const;
9431
9432 struct pv_time_ops {
9433 unsigned long long (*sched_clock)(void);
9434 unsigned long long (*steal_clock)(int cpu);
9435 unsigned long (*get_tsc_khz)(void);
9436 -};
9437 +} __no_const;
9438
9439 struct pv_cpu_ops {
9440 /* hooks for various privileged instructions */
9441 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
9442
9443 void (*start_context_switch)(struct task_struct *prev);
9444 void (*end_context_switch)(struct task_struct *next);
9445 -};
9446 +} __no_const;
9447
9448 struct pv_irq_ops {
9449 /*
9450 @@ -224,7 +224,7 @@ struct pv_apic_ops {
9451 unsigned long start_eip,
9452 unsigned long start_esp);
9453 #endif
9454 -};
9455 +} __no_const;
9456
9457 struct pv_mmu_ops {
9458 unsigned long (*read_cr2)(void);
9459 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
9460 struct paravirt_callee_save make_pud;
9461
9462 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9463 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9464 #endif /* PAGETABLE_LEVELS == 4 */
9465 #endif /* PAGETABLE_LEVELS >= 3 */
9466
9467 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
9468 an mfn. We can tell which is which from the index. */
9469 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9470 phys_addr_t phys, pgprot_t flags);
9471 +
9472 +#ifdef CONFIG_PAX_KERNEXEC
9473 + unsigned long (*pax_open_kernel)(void);
9474 + unsigned long (*pax_close_kernel)(void);
9475 +#endif
9476 +
9477 };
9478
9479 struct arch_spinlock;
9480 @@ -334,7 +341,7 @@ struct pv_lock_ops {
9481 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9482 int (*spin_trylock)(struct arch_spinlock *lock);
9483 void (*spin_unlock)(struct arch_spinlock *lock);
9484 -};
9485 +} __no_const;
9486
9487 /* This contains all the paravirt structures: we get a convenient
9488 * number for each function using the offset which we use to indicate
9489 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9490 index b4389a4..b7ff22c 100644
9491 --- a/arch/x86/include/asm/pgalloc.h
9492 +++ b/arch/x86/include/asm/pgalloc.h
9493 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9494 pmd_t *pmd, pte_t *pte)
9495 {
9496 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9497 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9498 +}
9499 +
9500 +static inline void pmd_populate_user(struct mm_struct *mm,
9501 + pmd_t *pmd, pte_t *pte)
9502 +{
9503 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9504 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9505 }
9506
9507 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9508 index 98391db..8f6984e 100644
9509 --- a/arch/x86/include/asm/pgtable-2level.h
9510 +++ b/arch/x86/include/asm/pgtable-2level.h
9511 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9512
9513 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9514 {
9515 + pax_open_kernel();
9516 *pmdp = pmd;
9517 + pax_close_kernel();
9518 }
9519
9520 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9521 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9522 index effff47..f9e4035 100644
9523 --- a/arch/x86/include/asm/pgtable-3level.h
9524 +++ b/arch/x86/include/asm/pgtable-3level.h
9525 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9526
9527 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9528 {
9529 + pax_open_kernel();
9530 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9531 + pax_close_kernel();
9532 }
9533
9534 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9535 {
9536 + pax_open_kernel();
9537 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9538 + pax_close_kernel();
9539 }
9540
9541 /*
9542 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9543 index 18601c8..3d716d1 100644
9544 --- a/arch/x86/include/asm/pgtable.h
9545 +++ b/arch/x86/include/asm/pgtable.h
9546 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9547
9548 #ifndef __PAGETABLE_PUD_FOLDED
9549 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9550 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9551 #define pgd_clear(pgd) native_pgd_clear(pgd)
9552 #endif
9553
9554 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9555
9556 #define arch_end_context_switch(prev) do {} while(0)
9557
9558 +#define pax_open_kernel() native_pax_open_kernel()
9559 +#define pax_close_kernel() native_pax_close_kernel()
9560 #endif /* CONFIG_PARAVIRT */
9561
9562 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9563 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9564 +
9565 +#ifdef CONFIG_PAX_KERNEXEC
9566 +static inline unsigned long native_pax_open_kernel(void)
9567 +{
9568 + unsigned long cr0;
9569 +
9570 + preempt_disable();
9571 + barrier();
9572 + cr0 = read_cr0() ^ X86_CR0_WP;
9573 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9574 + write_cr0(cr0);
9575 + return cr0 ^ X86_CR0_WP;
9576 +}
9577 +
9578 +static inline unsigned long native_pax_close_kernel(void)
9579 +{
9580 + unsigned long cr0;
9581 +
9582 + cr0 = read_cr0() ^ X86_CR0_WP;
9583 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9584 + write_cr0(cr0);
9585 + barrier();
9586 + preempt_enable_no_resched();
9587 + return cr0 ^ X86_CR0_WP;
9588 +}
9589 +#else
9590 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9591 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9592 +#endif
9593 +
9594 /*
9595 * The following only work if pte_present() is true.
9596 * Undefined behaviour if not..
9597 */
9598 +static inline int pte_user(pte_t pte)
9599 +{
9600 + return pte_val(pte) & _PAGE_USER;
9601 +}
9602 +
9603 static inline int pte_dirty(pte_t pte)
9604 {
9605 return pte_flags(pte) & _PAGE_DIRTY;
9606 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9607 return pte_clear_flags(pte, _PAGE_RW);
9608 }
9609
9610 +static inline pte_t pte_mkread(pte_t pte)
9611 +{
9612 + return __pte(pte_val(pte) | _PAGE_USER);
9613 +}
9614 +
9615 static inline pte_t pte_mkexec(pte_t pte)
9616 {
9617 - return pte_clear_flags(pte, _PAGE_NX);
9618 +#ifdef CONFIG_X86_PAE
9619 + if (__supported_pte_mask & _PAGE_NX)
9620 + return pte_clear_flags(pte, _PAGE_NX);
9621 + else
9622 +#endif
9623 + return pte_set_flags(pte, _PAGE_USER);
9624 +}
9625 +
9626 +static inline pte_t pte_exprotect(pte_t pte)
9627 +{
9628 +#ifdef CONFIG_X86_PAE
9629 + if (__supported_pte_mask & _PAGE_NX)
9630 + return pte_set_flags(pte, _PAGE_NX);
9631 + else
9632 +#endif
9633 + return pte_clear_flags(pte, _PAGE_USER);
9634 }
9635
9636 static inline pte_t pte_mkdirty(pte_t pte)
9637 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9638 #endif
9639
9640 #ifndef __ASSEMBLY__
9641 +
9642 +#ifdef CONFIG_PAX_PER_CPU_PGD
9643 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9644 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9645 +{
9646 + return cpu_pgd[cpu];
9647 +}
9648 +#endif
9649 +
9650 #include <linux/mm_types.h>
9651
9652 static inline int pte_none(pte_t pte)
9653 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9654
9655 static inline int pgd_bad(pgd_t pgd)
9656 {
9657 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9658 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9659 }
9660
9661 static inline int pgd_none(pgd_t pgd)
9662 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9663 * pgd_offset() returns a (pgd_t *)
9664 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9665 */
9666 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9667 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9668 +
9669 +#ifdef CONFIG_PAX_PER_CPU_PGD
9670 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9671 +#endif
9672 +
9673 /*
9674 * a shortcut which implies the use of the kernel's pgd, instead
9675 * of a process's
9676 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9677 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9678 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9679
9680 +#ifdef CONFIG_X86_32
9681 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9682 +#else
9683 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9684 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9685 +
9686 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9687 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9688 +#else
9689 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9690 +#endif
9691 +
9692 +#endif
9693 +
9694 #ifndef __ASSEMBLY__
9695
9696 extern int direct_gbpages;
9697 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9698 * dst and src can be on the same page, but the range must not overlap,
9699 * and must not cross a page boundary.
9700 */
9701 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9702 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9703 {
9704 - memcpy(dst, src, count * sizeof(pgd_t));
9705 + pax_open_kernel();
9706 + while (count--)
9707 + *dst++ = *src++;
9708 + pax_close_kernel();
9709 }
9710
9711 +#ifdef CONFIG_PAX_PER_CPU_PGD
9712 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9713 +#endif
9714 +
9715 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9716 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9717 +#else
9718 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9719 +#endif
9720
9721 #include <asm-generic/pgtable.h>
9722 #endif /* __ASSEMBLY__ */
9723 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9724 index 0c92113..34a77c6 100644
9725 --- a/arch/x86/include/asm/pgtable_32.h
9726 +++ b/arch/x86/include/asm/pgtable_32.h
9727 @@ -25,9 +25,6 @@
9728 struct mm_struct;
9729 struct vm_area_struct;
9730
9731 -extern pgd_t swapper_pg_dir[1024];
9732 -extern pgd_t initial_page_table[1024];
9733 -
9734 static inline void pgtable_cache_init(void) { }
9735 static inline void check_pgt_cache(void) { }
9736 void paging_init(void);
9737 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9738 # include <asm/pgtable-2level.h>
9739 #endif
9740
9741 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9742 +extern pgd_t initial_page_table[PTRS_PER_PGD];
9743 +#ifdef CONFIG_X86_PAE
9744 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9745 +#endif
9746 +
9747 #if defined(CONFIG_HIGHPTE)
9748 #define pte_offset_map(dir, address) \
9749 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9750 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9751 /* Clear a kernel PTE and flush it from the TLB */
9752 #define kpte_clear_flush(ptep, vaddr) \
9753 do { \
9754 + pax_open_kernel(); \
9755 pte_clear(&init_mm, (vaddr), (ptep)); \
9756 + pax_close_kernel(); \
9757 __flush_tlb_one((vaddr)); \
9758 } while (0)
9759
9760 @@ -74,6 +79,9 @@ do { \
9761
9762 #endif /* !__ASSEMBLY__ */
9763
9764 +#define HAVE_ARCH_UNMAPPED_AREA
9765 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9766 +
9767 /*
9768 * kern_addr_valid() is (1) for FLATMEM and (0) for
9769 * SPARSEMEM and DISCONTIGMEM
9770 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9771 index ed5903b..c7fe163 100644
9772 --- a/arch/x86/include/asm/pgtable_32_types.h
9773 +++ b/arch/x86/include/asm/pgtable_32_types.h
9774 @@ -8,7 +8,7 @@
9775 */
9776 #ifdef CONFIG_X86_PAE
9777 # include <asm/pgtable-3level_types.h>
9778 -# define PMD_SIZE (1UL << PMD_SHIFT)
9779 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9780 # define PMD_MASK (~(PMD_SIZE - 1))
9781 #else
9782 # include <asm/pgtable-2level_types.h>
9783 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9784 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9785 #endif
9786
9787 +#ifdef CONFIG_PAX_KERNEXEC
9788 +#ifndef __ASSEMBLY__
9789 +extern unsigned char MODULES_EXEC_VADDR[];
9790 +extern unsigned char MODULES_EXEC_END[];
9791 +#endif
9792 +#include <asm/boot.h>
9793 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9794 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9795 +#else
9796 +#define ktla_ktva(addr) (addr)
9797 +#define ktva_ktla(addr) (addr)
9798 +#endif
9799 +
9800 #define MODULES_VADDR VMALLOC_START
9801 #define MODULES_END VMALLOC_END
9802 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9803 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9804 index 975f709..107976d 100644
9805 --- a/arch/x86/include/asm/pgtable_64.h
9806 +++ b/arch/x86/include/asm/pgtable_64.h
9807 @@ -16,10 +16,14 @@
9808
9809 extern pud_t level3_kernel_pgt[512];
9810 extern pud_t level3_ident_pgt[512];
9811 +extern pud_t level3_vmalloc_start_pgt[512];
9812 +extern pud_t level3_vmalloc_end_pgt[512];
9813 +extern pud_t level3_vmemmap_pgt[512];
9814 +extern pud_t level2_vmemmap_pgt[512];
9815 extern pmd_t level2_kernel_pgt[512];
9816 extern pmd_t level2_fixmap_pgt[512];
9817 -extern pmd_t level2_ident_pgt[512];
9818 -extern pgd_t init_level4_pgt[];
9819 +extern pmd_t level2_ident_pgt[512*2];
9820 +extern pgd_t init_level4_pgt[512];
9821
9822 #define swapper_pg_dir init_level4_pgt
9823
9824 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9825
9826 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9827 {
9828 + pax_open_kernel();
9829 *pmdp = pmd;
9830 + pax_close_kernel();
9831 }
9832
9833 static inline void native_pmd_clear(pmd_t *pmd)
9834 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9835
9836 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9837 {
9838 + pax_open_kernel();
9839 + *pgdp = pgd;
9840 + pax_close_kernel();
9841 +}
9842 +
9843 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9844 +{
9845 *pgdp = pgd;
9846 }
9847
9848 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9849 index 766ea16..5b96cb3 100644
9850 --- a/arch/x86/include/asm/pgtable_64_types.h
9851 +++ b/arch/x86/include/asm/pgtable_64_types.h
9852 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9853 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9854 #define MODULES_END _AC(0xffffffffff000000, UL)
9855 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9856 +#define MODULES_EXEC_VADDR MODULES_VADDR
9857 +#define MODULES_EXEC_END MODULES_END
9858 +
9859 +#define ktla_ktva(addr) (addr)
9860 +#define ktva_ktla(addr) (addr)
9861
9862 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9863 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9864 index 013286a..8b42f4f 100644
9865 --- a/arch/x86/include/asm/pgtable_types.h
9866 +++ b/arch/x86/include/asm/pgtable_types.h
9867 @@ -16,13 +16,12 @@
9868 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9869 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9870 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9871 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9872 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9873 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9874 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9875 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9876 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9877 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9878 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9879 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9880 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9881 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9882
9883 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9884 @@ -40,7 +39,6 @@
9885 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9886 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9887 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9888 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9889 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9890 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9891 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9892 @@ -57,8 +55,10 @@
9893
9894 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9895 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9896 -#else
9897 +#elif defined(CONFIG_KMEMCHECK)
9898 #define _PAGE_NX (_AT(pteval_t, 0))
9899 +#else
9900 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9901 #endif
9902
9903 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9904 @@ -96,6 +96,9 @@
9905 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9906 _PAGE_ACCESSED)
9907
9908 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9909 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9910 +
9911 #define __PAGE_KERNEL_EXEC \
9912 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9913 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9914 @@ -106,7 +109,7 @@
9915 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9916 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9917 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9918 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9919 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9920 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9921 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9922 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9923 @@ -168,8 +171,8 @@
9924 * bits are combined, this will alow user to access the high address mapped
9925 * VDSO in the presence of CONFIG_COMPAT_VDSO
9926 */
9927 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9928 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9929 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9930 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9931 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9932 #endif
9933
9934 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9935 {
9936 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9937 }
9938 +#endif
9939
9940 +#if PAGETABLE_LEVELS == 3
9941 +#include <asm-generic/pgtable-nopud.h>
9942 +#endif
9943 +
9944 +#if PAGETABLE_LEVELS == 2
9945 +#include <asm-generic/pgtable-nopmd.h>
9946 +#endif
9947 +
9948 +#ifndef __ASSEMBLY__
9949 #if PAGETABLE_LEVELS > 3
9950 typedef struct { pudval_t pud; } pud_t;
9951
9952 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9953 return pud.pud;
9954 }
9955 #else
9956 -#include <asm-generic/pgtable-nopud.h>
9957 -
9958 static inline pudval_t native_pud_val(pud_t pud)
9959 {
9960 return native_pgd_val(pud.pgd);
9961 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9962 return pmd.pmd;
9963 }
9964 #else
9965 -#include <asm-generic/pgtable-nopmd.h>
9966 -
9967 static inline pmdval_t native_pmd_val(pmd_t pmd)
9968 {
9969 return native_pgd_val(pmd.pud.pgd);
9970 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9971
9972 extern pteval_t __supported_pte_mask;
9973 extern void set_nx(void);
9974 -extern int nx_enabled;
9975
9976 #define pgprot_writecombine pgprot_writecombine
9977 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9978 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9979 index b650435..eefa566 100644
9980 --- a/arch/x86/include/asm/processor.h
9981 +++ b/arch/x86/include/asm/processor.h
9982 @@ -268,7 +268,7 @@ struct tss_struct {
9983
9984 } ____cacheline_aligned;
9985
9986 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9987 +extern struct tss_struct init_tss[NR_CPUS];
9988
9989 /*
9990 * Save the original ist values for checking stack pointers during debugging
9991 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
9992 */
9993 #define TASK_SIZE PAGE_OFFSET
9994 #define TASK_SIZE_MAX TASK_SIZE
9995 +
9996 +#ifdef CONFIG_PAX_SEGMEXEC
9997 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9998 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9999 +#else
10000 #define STACK_TOP TASK_SIZE
10001 -#define STACK_TOP_MAX STACK_TOP
10002 +#endif
10003 +
10004 +#define STACK_TOP_MAX TASK_SIZE
10005
10006 #define INIT_THREAD { \
10007 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10008 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10009 .vm86_info = NULL, \
10010 .sysenter_cs = __KERNEL_CS, \
10011 .io_bitmap_ptr = NULL, \
10012 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
10013 */
10014 #define INIT_TSS { \
10015 .x86_tss = { \
10016 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10017 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10018 .ss0 = __KERNEL_DS, \
10019 .ss1 = __KERNEL_CS, \
10020 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10021 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
10022 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10023
10024 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10025 -#define KSTK_TOP(info) \
10026 -({ \
10027 - unsigned long *__ptr = (unsigned long *)(info); \
10028 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10029 -})
10030 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10031
10032 /*
10033 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10034 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10035 #define task_pt_regs(task) \
10036 ({ \
10037 struct pt_regs *__regs__; \
10038 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10039 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10040 __regs__ - 1; \
10041 })
10042
10043 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10044 /*
10045 * User space process size. 47bits minus one guard page.
10046 */
10047 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10048 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10049
10050 /* This decides where the kernel will search for a free chunk of vm
10051 * space during mmap's.
10052 */
10053 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10054 - 0xc0000000 : 0xFFFFe000)
10055 + 0xc0000000 : 0xFFFFf000)
10056
10057 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10058 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10059 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10060 #define STACK_TOP_MAX TASK_SIZE_MAX
10061
10062 #define INIT_THREAD { \
10063 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10064 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10065 }
10066
10067 #define INIT_TSS { \
10068 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10069 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10070 }
10071
10072 /*
10073 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
10074 */
10075 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10076
10077 +#ifdef CONFIG_PAX_SEGMEXEC
10078 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10079 +#endif
10080 +
10081 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10082
10083 /* Get/set a process' ability to use the timestamp counter instruction */
10084 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
10085 index 3566454..4bdfb8c 100644
10086 --- a/arch/x86/include/asm/ptrace.h
10087 +++ b/arch/x86/include/asm/ptrace.h
10088 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
10089 }
10090
10091 /*
10092 - * user_mode_vm(regs) determines whether a register set came from user mode.
10093 + * user_mode(regs) determines whether a register set came from user mode.
10094 * This is true if V8086 mode was enabled OR if the register set was from
10095 * protected mode with RPL-3 CS value. This tricky test checks that with
10096 * one comparison. Many places in the kernel can bypass this full check
10097 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10098 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10099 + * be used.
10100 */
10101 -static inline int user_mode(struct pt_regs *regs)
10102 +static inline int user_mode_novm(struct pt_regs *regs)
10103 {
10104 #ifdef CONFIG_X86_32
10105 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10106 #else
10107 - return !!(regs->cs & 3);
10108 + return !!(regs->cs & SEGMENT_RPL_MASK);
10109 #endif
10110 }
10111
10112 -static inline int user_mode_vm(struct pt_regs *regs)
10113 +static inline int user_mode(struct pt_regs *regs)
10114 {
10115 #ifdef CONFIG_X86_32
10116 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10117 USER_RPL;
10118 #else
10119 - return user_mode(regs);
10120 + return user_mode_novm(regs);
10121 #endif
10122 }
10123
10124 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
10125 #ifdef CONFIG_X86_64
10126 static inline bool user_64bit_mode(struct pt_regs *regs)
10127 {
10128 + unsigned long cs = regs->cs & 0xffff;
10129 #ifndef CONFIG_PARAVIRT
10130 /*
10131 * On non-paravirt systems, this is the only long mode CPL 3
10132 * selector. We do not allow long mode selectors in the LDT.
10133 */
10134 - return regs->cs == __USER_CS;
10135 + return cs == __USER_CS;
10136 #else
10137 /* Headers are too twisted for this to go in paravirt.h. */
10138 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
10139 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
10140 #endif
10141 }
10142 #endif
10143 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
10144 index 92f29706..a79cbbb 100644
10145 --- a/arch/x86/include/asm/reboot.h
10146 +++ b/arch/x86/include/asm/reboot.h
10147 @@ -6,19 +6,19 @@
10148 struct pt_regs;
10149
10150 struct machine_ops {
10151 - void (*restart)(char *cmd);
10152 - void (*halt)(void);
10153 - void (*power_off)(void);
10154 + void (* __noreturn restart)(char *cmd);
10155 + void (* __noreturn halt)(void);
10156 + void (* __noreturn power_off)(void);
10157 void (*shutdown)(void);
10158 void (*crash_shutdown)(struct pt_regs *);
10159 - void (*emergency_restart)(void);
10160 -};
10161 + void (* __noreturn emergency_restart)(void);
10162 +} __no_const;
10163
10164 extern struct machine_ops machine_ops;
10165
10166 void native_machine_crash_shutdown(struct pt_regs *regs);
10167 void native_machine_shutdown(void);
10168 -void machine_real_restart(unsigned int type);
10169 +void machine_real_restart(unsigned int type) __noreturn;
10170 /* These must match dispatch_table in reboot_32.S */
10171 #define MRR_BIOS 0
10172 #define MRR_APM 1
10173 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
10174 index 2dbe4a7..ce1db00 100644
10175 --- a/arch/x86/include/asm/rwsem.h
10176 +++ b/arch/x86/include/asm/rwsem.h
10177 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
10178 {
10179 asm volatile("# beginning down_read\n\t"
10180 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10181 +
10182 +#ifdef CONFIG_PAX_REFCOUNT
10183 + "jno 0f\n"
10184 + LOCK_PREFIX _ASM_DEC "(%1)\n"
10185 + "int $4\n0:\n"
10186 + _ASM_EXTABLE(0b, 0b)
10187 +#endif
10188 +
10189 /* adds 0x00000001 */
10190 " jns 1f\n"
10191 " call call_rwsem_down_read_failed\n"
10192 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
10193 "1:\n\t"
10194 " mov %1,%2\n\t"
10195 " add %3,%2\n\t"
10196 +
10197 +#ifdef CONFIG_PAX_REFCOUNT
10198 + "jno 0f\n"
10199 + "sub %3,%2\n"
10200 + "int $4\n0:\n"
10201 + _ASM_EXTABLE(0b, 0b)
10202 +#endif
10203 +
10204 " jle 2f\n\t"
10205 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10206 " jnz 1b\n\t"
10207 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
10208 long tmp;
10209 asm volatile("# beginning down_write\n\t"
10210 LOCK_PREFIX " xadd %1,(%2)\n\t"
10211 +
10212 +#ifdef CONFIG_PAX_REFCOUNT
10213 + "jno 0f\n"
10214 + "mov %1,(%2)\n"
10215 + "int $4\n0:\n"
10216 + _ASM_EXTABLE(0b, 0b)
10217 +#endif
10218 +
10219 /* adds 0xffff0001, returns the old value */
10220 " test %1,%1\n\t"
10221 /* was the count 0 before? */
10222 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
10223 long tmp;
10224 asm volatile("# beginning __up_read\n\t"
10225 LOCK_PREFIX " xadd %1,(%2)\n\t"
10226 +
10227 +#ifdef CONFIG_PAX_REFCOUNT
10228 + "jno 0f\n"
10229 + "mov %1,(%2)\n"
10230 + "int $4\n0:\n"
10231 + _ASM_EXTABLE(0b, 0b)
10232 +#endif
10233 +
10234 /* subtracts 1, returns the old value */
10235 " jns 1f\n\t"
10236 " call call_rwsem_wake\n" /* expects old value in %edx */
10237 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
10238 long tmp;
10239 asm volatile("# beginning __up_write\n\t"
10240 LOCK_PREFIX " xadd %1,(%2)\n\t"
10241 +
10242 +#ifdef CONFIG_PAX_REFCOUNT
10243 + "jno 0f\n"
10244 + "mov %1,(%2)\n"
10245 + "int $4\n0:\n"
10246 + _ASM_EXTABLE(0b, 0b)
10247 +#endif
10248 +
10249 /* subtracts 0xffff0001, returns the old value */
10250 " jns 1f\n\t"
10251 " call call_rwsem_wake\n" /* expects old value in %edx */
10252 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10253 {
10254 asm volatile("# beginning __downgrade_write\n\t"
10255 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10256 +
10257 +#ifdef CONFIG_PAX_REFCOUNT
10258 + "jno 0f\n"
10259 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10260 + "int $4\n0:\n"
10261 + _ASM_EXTABLE(0b, 0b)
10262 +#endif
10263 +
10264 /*
10265 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10266 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10267 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10268 */
10269 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10270 {
10271 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10272 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10273 +
10274 +#ifdef CONFIG_PAX_REFCOUNT
10275 + "jno 0f\n"
10276 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10277 + "int $4\n0:\n"
10278 + _ASM_EXTABLE(0b, 0b)
10279 +#endif
10280 +
10281 : "+m" (sem->count)
10282 : "er" (delta));
10283 }
10284 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10285 */
10286 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10287 {
10288 - return delta + xadd(&sem->count, delta);
10289 + return delta + xadd_check_overflow(&sem->count, delta);
10290 }
10291
10292 #endif /* __KERNEL__ */
10293 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10294 index 5e64171..f58957e 100644
10295 --- a/arch/x86/include/asm/segment.h
10296 +++ b/arch/x86/include/asm/segment.h
10297 @@ -64,10 +64,15 @@
10298 * 26 - ESPFIX small SS
10299 * 27 - per-cpu [ offset to per-cpu data area ]
10300 * 28 - stack_canary-20 [ for stack protector ]
10301 - * 29 - unused
10302 - * 30 - unused
10303 + * 29 - PCI BIOS CS
10304 + * 30 - PCI BIOS DS
10305 * 31 - TSS for double fault handler
10306 */
10307 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10308 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10309 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10310 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10311 +
10312 #define GDT_ENTRY_TLS_MIN 6
10313 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10314
10315 @@ -79,6 +84,8 @@
10316
10317 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10318
10319 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10320 +
10321 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10322
10323 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10324 @@ -104,6 +111,12 @@
10325 #define __KERNEL_STACK_CANARY 0
10326 #endif
10327
10328 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10329 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10330 +
10331 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10332 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10333 +
10334 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10335
10336 /*
10337 @@ -141,7 +154,7 @@
10338 */
10339
10340 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10341 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10342 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10343
10344
10345 #else
10346 @@ -165,6 +178,8 @@
10347 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10348 #define __USER32_DS __USER_DS
10349
10350 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10351 +
10352 #define GDT_ENTRY_TSS 8 /* needs two entries */
10353 #define GDT_ENTRY_LDT 10 /* needs two entries */
10354 #define GDT_ENTRY_TLS_MIN 12
10355 @@ -185,6 +200,7 @@
10356 #endif
10357
10358 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10359 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10360 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10361 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10362 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10363 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10364 index 73b11bc..d4a3b63 100644
10365 --- a/arch/x86/include/asm/smp.h
10366 +++ b/arch/x86/include/asm/smp.h
10367 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10368 /* cpus sharing the last level cache: */
10369 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10370 DECLARE_PER_CPU(u16, cpu_llc_id);
10371 -DECLARE_PER_CPU(int, cpu_number);
10372 +DECLARE_PER_CPU(unsigned int, cpu_number);
10373
10374 static inline struct cpumask *cpu_sibling_mask(int cpu)
10375 {
10376 @@ -77,7 +77,7 @@ struct smp_ops {
10377
10378 void (*send_call_func_ipi)(const struct cpumask *mask);
10379 void (*send_call_func_single_ipi)(int cpu);
10380 -};
10381 +} __no_const;
10382
10383 /* Globals due to paravirt */
10384 extern void set_cpu_sibling_map(int cpu);
10385 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10386 extern int safe_smp_processor_id(void);
10387
10388 #elif defined(CONFIG_X86_64_SMP)
10389 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10390 -
10391 -#define stack_smp_processor_id() \
10392 -({ \
10393 - struct thread_info *ti; \
10394 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10395 - ti->cpu; \
10396 -})
10397 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10398 +#define stack_smp_processor_id() raw_smp_processor_id()
10399 #define safe_smp_processor_id() smp_processor_id()
10400
10401 #endif
10402 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10403 index 972c260..43ab1fd 100644
10404 --- a/arch/x86/include/asm/spinlock.h
10405 +++ b/arch/x86/include/asm/spinlock.h
10406 @@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10407 static inline void arch_read_lock(arch_rwlock_t *rw)
10408 {
10409 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10410 +
10411 +#ifdef CONFIG_PAX_REFCOUNT
10412 + "jno 0f\n"
10413 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10414 + "int $4\n0:\n"
10415 + _ASM_EXTABLE(0b, 0b)
10416 +#endif
10417 +
10418 "jns 1f\n"
10419 "call __read_lock_failed\n\t"
10420 "1:\n"
10421 @@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10422 static inline void arch_write_lock(arch_rwlock_t *rw)
10423 {
10424 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10425 +
10426 +#ifdef CONFIG_PAX_REFCOUNT
10427 + "jno 0f\n"
10428 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10429 + "int $4\n0:\n"
10430 + _ASM_EXTABLE(0b, 0b)
10431 +#endif
10432 +
10433 "jz 1f\n"
10434 "call __write_lock_failed\n\t"
10435 "1:\n"
10436 @@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10437
10438 static inline void arch_read_unlock(arch_rwlock_t *rw)
10439 {
10440 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10441 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10442 +
10443 +#ifdef CONFIG_PAX_REFCOUNT
10444 + "jno 0f\n"
10445 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10446 + "int $4\n0:\n"
10447 + _ASM_EXTABLE(0b, 0b)
10448 +#endif
10449 +
10450 :"+m" (rw->lock) : : "memory");
10451 }
10452
10453 static inline void arch_write_unlock(arch_rwlock_t *rw)
10454 {
10455 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10456 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10457 +
10458 +#ifdef CONFIG_PAX_REFCOUNT
10459 + "jno 0f\n"
10460 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10461 + "int $4\n0:\n"
10462 + _ASM_EXTABLE(0b, 0b)
10463 +#endif
10464 +
10465 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10466 }
10467
10468 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10469 index 1575177..cb23f52 100644
10470 --- a/arch/x86/include/asm/stackprotector.h
10471 +++ b/arch/x86/include/asm/stackprotector.h
10472 @@ -48,7 +48,7 @@
10473 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10474 */
10475 #define GDT_STACK_CANARY_INIT \
10476 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10477 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10478
10479 /*
10480 * Initialize the stackprotector canary value.
10481 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10482
10483 static inline void load_stack_canary_segment(void)
10484 {
10485 -#ifdef CONFIG_X86_32
10486 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10487 asm volatile ("mov %0, %%gs" : : "r" (0));
10488 #endif
10489 }
10490 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10491 index 70bbe39..4ae2bd4 100644
10492 --- a/arch/x86/include/asm/stacktrace.h
10493 +++ b/arch/x86/include/asm/stacktrace.h
10494 @@ -11,28 +11,20 @@
10495
10496 extern int kstack_depth_to_print;
10497
10498 -struct thread_info;
10499 +struct task_struct;
10500 struct stacktrace_ops;
10501
10502 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10503 - unsigned long *stack,
10504 - unsigned long bp,
10505 - const struct stacktrace_ops *ops,
10506 - void *data,
10507 - unsigned long *end,
10508 - int *graph);
10509 +typedef unsigned long walk_stack_t(struct task_struct *task,
10510 + void *stack_start,
10511 + unsigned long *stack,
10512 + unsigned long bp,
10513 + const struct stacktrace_ops *ops,
10514 + void *data,
10515 + unsigned long *end,
10516 + int *graph);
10517
10518 -extern unsigned long
10519 -print_context_stack(struct thread_info *tinfo,
10520 - unsigned long *stack, unsigned long bp,
10521 - const struct stacktrace_ops *ops, void *data,
10522 - unsigned long *end, int *graph);
10523 -
10524 -extern unsigned long
10525 -print_context_stack_bp(struct thread_info *tinfo,
10526 - unsigned long *stack, unsigned long bp,
10527 - const struct stacktrace_ops *ops, void *data,
10528 - unsigned long *end, int *graph);
10529 +extern walk_stack_t print_context_stack;
10530 +extern walk_stack_t print_context_stack_bp;
10531
10532 /* Generic stack tracer with callbacks */
10533
10534 @@ -40,7 +32,7 @@ struct stacktrace_ops {
10535 void (*address)(void *data, unsigned long address, int reliable);
10536 /* On negative return stop dumping */
10537 int (*stack)(void *data, char *name);
10538 - walk_stack_t walk_stack;
10539 + walk_stack_t *walk_stack;
10540 };
10541
10542 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10543 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10544 index cb23852..2dde194 100644
10545 --- a/arch/x86/include/asm/sys_ia32.h
10546 +++ b/arch/x86/include/asm/sys_ia32.h
10547 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10548 compat_sigset_t __user *, unsigned int);
10549 asmlinkage long sys32_alarm(unsigned int);
10550
10551 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10552 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10553 asmlinkage long sys32_sysfs(int, u32, u32);
10554
10555 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10556 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10557 index 2d2f01c..f985723 100644
10558 --- a/arch/x86/include/asm/system.h
10559 +++ b/arch/x86/include/asm/system.h
10560 @@ -129,7 +129,7 @@ do { \
10561 "call __switch_to\n\t" \
10562 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10563 __switch_canary \
10564 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10565 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10566 "movq %%rax,%%rdi\n\t" \
10567 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10568 "jnz ret_from_fork\n\t" \
10569 @@ -140,7 +140,7 @@ do { \
10570 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10571 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10572 [_tif_fork] "i" (_TIF_FORK), \
10573 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10574 + [thread_info] "m" (current_tinfo), \
10575 [current_task] "m" (current_task) \
10576 __switch_canary_iparam \
10577 : "memory", "cc" __EXTRA_CLOBBER)
10578 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10579 {
10580 unsigned long __limit;
10581 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10582 - return __limit + 1;
10583 + return __limit;
10584 }
10585
10586 static inline void native_clts(void)
10587 @@ -397,13 +397,13 @@ void enable_hlt(void);
10588
10589 void cpu_idle_wait(void);
10590
10591 -extern unsigned long arch_align_stack(unsigned long sp);
10592 +#define arch_align_stack(x) ((x) & ~0xfUL)
10593 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10594
10595 void default_idle(void);
10596 bool set_pm_idle_to_default(void);
10597
10598 -void stop_this_cpu(void *dummy);
10599 +void stop_this_cpu(void *dummy) __noreturn;
10600
10601 /*
10602 * Force strict CPU ordering.
10603 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10604 index a1fe5c1..ee326d8 100644
10605 --- a/arch/x86/include/asm/thread_info.h
10606 +++ b/arch/x86/include/asm/thread_info.h
10607 @@ -10,6 +10,7 @@
10608 #include <linux/compiler.h>
10609 #include <asm/page.h>
10610 #include <asm/types.h>
10611 +#include <asm/percpu.h>
10612
10613 /*
10614 * low level task data that entry.S needs immediate access to
10615 @@ -24,7 +25,6 @@ struct exec_domain;
10616 #include <linux/atomic.h>
10617
10618 struct thread_info {
10619 - struct task_struct *task; /* main task structure */
10620 struct exec_domain *exec_domain; /* execution domain */
10621 __u32 flags; /* low level flags */
10622 __u32 status; /* thread synchronous flags */
10623 @@ -34,18 +34,12 @@ struct thread_info {
10624 mm_segment_t addr_limit;
10625 struct restart_block restart_block;
10626 void __user *sysenter_return;
10627 -#ifdef CONFIG_X86_32
10628 - unsigned long previous_esp; /* ESP of the previous stack in
10629 - case of nested (IRQ) stacks
10630 - */
10631 - __u8 supervisor_stack[0];
10632 -#endif
10633 + unsigned long lowest_stack;
10634 int uaccess_err;
10635 };
10636
10637 -#define INIT_THREAD_INFO(tsk) \
10638 +#define INIT_THREAD_INFO \
10639 { \
10640 - .task = &tsk, \
10641 .exec_domain = &default_exec_domain, \
10642 .flags = 0, \
10643 .cpu = 0, \
10644 @@ -56,7 +50,7 @@ struct thread_info {
10645 }, \
10646 }
10647
10648 -#define init_thread_info (init_thread_union.thread_info)
10649 +#define init_thread_info (init_thread_union.stack)
10650 #define init_stack (init_thread_union.stack)
10651
10652 #else /* !__ASSEMBLY__ */
10653 @@ -170,45 +164,40 @@ struct thread_info {
10654 ret; \
10655 })
10656
10657 -#ifdef CONFIG_X86_32
10658 -
10659 -#define STACK_WARN (THREAD_SIZE/8)
10660 -/*
10661 - * macros/functions for gaining access to the thread information structure
10662 - *
10663 - * preempt_count needs to be 1 initially, until the scheduler is functional.
10664 - */
10665 -#ifndef __ASSEMBLY__
10666 -
10667 -
10668 -/* how to get the current stack pointer from C */
10669 -register unsigned long current_stack_pointer asm("esp") __used;
10670 -
10671 -/* how to get the thread information struct from C */
10672 -static inline struct thread_info *current_thread_info(void)
10673 -{
10674 - return (struct thread_info *)
10675 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10676 -}
10677 -
10678 -#else /* !__ASSEMBLY__ */
10679 -
10680 +#ifdef __ASSEMBLY__
10681 /* how to get the thread information struct from ASM */
10682 #define GET_THREAD_INFO(reg) \
10683 - movl $-THREAD_SIZE, reg; \
10684 - andl %esp, reg
10685 + mov PER_CPU_VAR(current_tinfo), reg
10686
10687 /* use this one if reg already contains %esp */
10688 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10689 - andl $-THREAD_SIZE, reg
10690 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10691 +#else
10692 +/* how to get the thread information struct from C */
10693 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10694 +
10695 +static __always_inline struct thread_info *current_thread_info(void)
10696 +{
10697 + return percpu_read_stable(current_tinfo);
10698 +}
10699 +#endif
10700 +
10701 +#ifdef CONFIG_X86_32
10702 +
10703 +#define STACK_WARN (THREAD_SIZE/8)
10704 +/*
10705 + * macros/functions for gaining access to the thread information structure
10706 + *
10707 + * preempt_count needs to be 1 initially, until the scheduler is functional.
10708 + */
10709 +#ifndef __ASSEMBLY__
10710 +
10711 +/* how to get the current stack pointer from C */
10712 +register unsigned long current_stack_pointer asm("esp") __used;
10713
10714 #endif
10715
10716 #else /* X86_32 */
10717
10718 -#include <asm/percpu.h>
10719 -#define KERNEL_STACK_OFFSET (5*8)
10720 -
10721 /*
10722 * macros/functions for gaining access to the thread information structure
10723 * preempt_count needs to be 1 initially, until the scheduler is functional.
10724 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10725 #ifndef __ASSEMBLY__
10726 DECLARE_PER_CPU(unsigned long, kernel_stack);
10727
10728 -static inline struct thread_info *current_thread_info(void)
10729 -{
10730 - struct thread_info *ti;
10731 - ti = (void *)(percpu_read_stable(kernel_stack) +
10732 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10733 - return ti;
10734 -}
10735 -
10736 -#else /* !__ASSEMBLY__ */
10737 -
10738 -/* how to get the thread information struct from ASM */
10739 -#define GET_THREAD_INFO(reg) \
10740 - movq PER_CPU_VAR(kernel_stack),reg ; \
10741 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10742 -
10743 +/* how to get the current stack pointer from C */
10744 +register unsigned long current_stack_pointer asm("rsp") __used;
10745 #endif
10746
10747 #endif /* !X86_32 */
10748 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10749 extern void free_thread_info(struct thread_info *ti);
10750 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10751 #define arch_task_cache_init arch_task_cache_init
10752 +
10753 +#define __HAVE_THREAD_FUNCTIONS
10754 +#define task_thread_info(task) (&(task)->tinfo)
10755 +#define task_stack_page(task) ((task)->stack)
10756 +#define setup_thread_stack(p, org) do {} while (0)
10757 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10758 +
10759 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10760 +extern struct task_struct *alloc_task_struct_node(int node);
10761 +extern void free_task_struct(struct task_struct *);
10762 +
10763 #endif
10764 #endif /* _ASM_X86_THREAD_INFO_H */
10765 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10766 index 36361bf..324f262 100644
10767 --- a/arch/x86/include/asm/uaccess.h
10768 +++ b/arch/x86/include/asm/uaccess.h
10769 @@ -7,12 +7,15 @@
10770 #include <linux/compiler.h>
10771 #include <linux/thread_info.h>
10772 #include <linux/string.h>
10773 +#include <linux/sched.h>
10774 #include <asm/asm.h>
10775 #include <asm/page.h>
10776
10777 #define VERIFY_READ 0
10778 #define VERIFY_WRITE 1
10779
10780 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10781 +
10782 /*
10783 * The fs value determines whether argument validity checking should be
10784 * performed or not. If get_fs() == USER_DS, checking is performed, with
10785 @@ -28,7 +31,12 @@
10786
10787 #define get_ds() (KERNEL_DS)
10788 #define get_fs() (current_thread_info()->addr_limit)
10789 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10790 +void __set_fs(mm_segment_t x);
10791 +void set_fs(mm_segment_t x);
10792 +#else
10793 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10794 +#endif
10795
10796 #define segment_eq(a, b) ((a).seg == (b).seg)
10797
10798 @@ -76,7 +84,33 @@
10799 * checks that the pointer is in the user space range - after calling
10800 * this function, memory access functions may still return -EFAULT.
10801 */
10802 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10803 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10804 +#define access_ok(type, addr, size) \
10805 +({ \
10806 + long __size = size; \
10807 + unsigned long __addr = (unsigned long)addr; \
10808 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10809 + unsigned long __end_ao = __addr + __size - 1; \
10810 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10811 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10812 + while(__addr_ao <= __end_ao) { \
10813 + char __c_ao; \
10814 + __addr_ao += PAGE_SIZE; \
10815 + if (__size > PAGE_SIZE) \
10816 + cond_resched(); \
10817 + if (__get_user(__c_ao, (char __user *)__addr)) \
10818 + break; \
10819 + if (type != VERIFY_WRITE) { \
10820 + __addr = __addr_ao; \
10821 + continue; \
10822 + } \
10823 + if (__put_user(__c_ao, (char __user *)__addr)) \
10824 + break; \
10825 + __addr = __addr_ao; \
10826 + } \
10827 + } \
10828 + __ret_ao; \
10829 +})
10830
10831 /*
10832 * The exception table consists of pairs of addresses: the first is the
10833 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10834 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10835 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10836
10837 -
10838 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10839 +#define __copyuser_seg "gs;"
10840 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10841 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10842 +#else
10843 +#define __copyuser_seg
10844 +#define __COPYUSER_SET_ES
10845 +#define __COPYUSER_RESTORE_ES
10846 +#endif
10847
10848 #ifdef CONFIG_X86_32
10849 #define __put_user_asm_u64(x, addr, err, errret) \
10850 - asm volatile("1: movl %%eax,0(%2)\n" \
10851 - "2: movl %%edx,4(%2)\n" \
10852 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10853 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10854 "3:\n" \
10855 ".section .fixup,\"ax\"\n" \
10856 "4: movl %3,%0\n" \
10857 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10858 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10859
10860 #define __put_user_asm_ex_u64(x, addr) \
10861 - asm volatile("1: movl %%eax,0(%1)\n" \
10862 - "2: movl %%edx,4(%1)\n" \
10863 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10864 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10865 "3:\n" \
10866 _ASM_EXTABLE(1b, 2b - 1b) \
10867 _ASM_EXTABLE(2b, 3b - 2b) \
10868 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10869 __typeof__(*(ptr)) __pu_val; \
10870 __chk_user_ptr(ptr); \
10871 might_fault(); \
10872 - __pu_val = x; \
10873 + __pu_val = (x); \
10874 switch (sizeof(*(ptr))) { \
10875 case 1: \
10876 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10877 @@ -373,7 +415,7 @@ do { \
10878 } while (0)
10879
10880 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10881 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10882 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10883 "2:\n" \
10884 ".section .fixup,\"ax\"\n" \
10885 "3: mov %3,%0\n" \
10886 @@ -381,7 +423,7 @@ do { \
10887 " jmp 2b\n" \
10888 ".previous\n" \
10889 _ASM_EXTABLE(1b, 3b) \
10890 - : "=r" (err), ltype(x) \
10891 + : "=r" (err), ltype (x) \
10892 : "m" (__m(addr)), "i" (errret), "0" (err))
10893
10894 #define __get_user_size_ex(x, ptr, size) \
10895 @@ -406,7 +448,7 @@ do { \
10896 } while (0)
10897
10898 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10899 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10900 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10901 "2:\n" \
10902 _ASM_EXTABLE(1b, 2b - 1b) \
10903 : ltype(x) : "m" (__m(addr)))
10904 @@ -423,13 +465,24 @@ do { \
10905 int __gu_err; \
10906 unsigned long __gu_val; \
10907 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10908 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10909 + (x) = (__typeof__(*(ptr)))__gu_val; \
10910 __gu_err; \
10911 })
10912
10913 /* FIXME: this hack is definitely wrong -AK */
10914 struct __large_struct { unsigned long buf[100]; };
10915 -#define __m(x) (*(struct __large_struct __user *)(x))
10916 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10917 +#define ____m(x) \
10918 +({ \
10919 + unsigned long ____x = (unsigned long)(x); \
10920 + if (____x < PAX_USER_SHADOW_BASE) \
10921 + ____x += PAX_USER_SHADOW_BASE; \
10922 + (void __user *)____x; \
10923 +})
10924 +#else
10925 +#define ____m(x) (x)
10926 +#endif
10927 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10928
10929 /*
10930 * Tell gcc we read from memory instead of writing: this is because
10931 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10932 * aliasing issues.
10933 */
10934 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10935 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10936 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10937 "2:\n" \
10938 ".section .fixup,\"ax\"\n" \
10939 "3: mov %3,%0\n" \
10940 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10941 ".previous\n" \
10942 _ASM_EXTABLE(1b, 3b) \
10943 : "=r"(err) \
10944 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10945 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10946
10947 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10948 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10949 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10950 "2:\n" \
10951 _ASM_EXTABLE(1b, 2b - 1b) \
10952 : : ltype(x), "m" (__m(addr)))
10953 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10954 * On error, the variable @x is set to zero.
10955 */
10956
10957 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10958 +#define __get_user(x, ptr) get_user((x), (ptr))
10959 +#else
10960 #define __get_user(x, ptr) \
10961 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10962 +#endif
10963
10964 /**
10965 * __put_user: - Write a simple value into user space, with less checking.
10966 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10967 * Returns zero on success, or -EFAULT on error.
10968 */
10969
10970 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10971 +#define __put_user(x, ptr) put_user((x), (ptr))
10972 +#else
10973 #define __put_user(x, ptr) \
10974 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10975 +#endif
10976
10977 #define __get_user_unaligned __get_user
10978 #define __put_user_unaligned __put_user
10979 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10980 #define get_user_ex(x, ptr) do { \
10981 unsigned long __gue_val; \
10982 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10983 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10984 + (x) = (__typeof__(*(ptr)))__gue_val; \
10985 } while (0)
10986
10987 #ifdef CONFIG_X86_WP_WORKS_OK
10988 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10989 index 566e803..b9521e9 100644
10990 --- a/arch/x86/include/asm/uaccess_32.h
10991 +++ b/arch/x86/include/asm/uaccess_32.h
10992 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10993 static __always_inline unsigned long __must_check
10994 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10995 {
10996 + if ((long)n < 0)
10997 + return n;
10998 +
10999 if (__builtin_constant_p(n)) {
11000 unsigned long ret;
11001
11002 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
11003 return ret;
11004 }
11005 }
11006 + if (!__builtin_constant_p(n))
11007 + check_object_size(from, n, true);
11008 return __copy_to_user_ll(to, from, n);
11009 }
11010
11011 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
11012 __copy_to_user(void __user *to, const void *from, unsigned long n)
11013 {
11014 might_fault();
11015 +
11016 return __copy_to_user_inatomic(to, from, n);
11017 }
11018
11019 static __always_inline unsigned long
11020 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
11021 {
11022 + if ((long)n < 0)
11023 + return n;
11024 +
11025 /* Avoid zeroing the tail if the copy fails..
11026 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
11027 * but as the zeroing behaviour is only significant when n is not
11028 @@ -137,6 +146,10 @@ static __always_inline unsigned long
11029 __copy_from_user(void *to, const void __user *from, unsigned long n)
11030 {
11031 might_fault();
11032 +
11033 + if ((long)n < 0)
11034 + return n;
11035 +
11036 if (__builtin_constant_p(n)) {
11037 unsigned long ret;
11038
11039 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
11040 return ret;
11041 }
11042 }
11043 + if (!__builtin_constant_p(n))
11044 + check_object_size(to, n, false);
11045 return __copy_from_user_ll(to, from, n);
11046 }
11047
11048 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
11049 const void __user *from, unsigned long n)
11050 {
11051 might_fault();
11052 +
11053 + if ((long)n < 0)
11054 + return n;
11055 +
11056 if (__builtin_constant_p(n)) {
11057 unsigned long ret;
11058
11059 @@ -181,15 +200,19 @@ static __always_inline unsigned long
11060 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
11061 unsigned long n)
11062 {
11063 - return __copy_from_user_ll_nocache_nozero(to, from, n);
11064 + if ((long)n < 0)
11065 + return n;
11066 +
11067 + return __copy_from_user_ll_nocache_nozero(to, from, n);
11068 }
11069
11070 -unsigned long __must_check copy_to_user(void __user *to,
11071 - const void *from, unsigned long n);
11072 -unsigned long __must_check _copy_from_user(void *to,
11073 - const void __user *from,
11074 - unsigned long n);
11075 -
11076 +extern void copy_to_user_overflow(void)
11077 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
11078 + __compiletime_error("copy_to_user() buffer size is not provably correct")
11079 +#else
11080 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
11081 +#endif
11082 +;
11083
11084 extern void copy_from_user_overflow(void)
11085 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
11086 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
11087 #endif
11088 ;
11089
11090 -static inline unsigned long __must_check copy_from_user(void *to,
11091 - const void __user *from,
11092 - unsigned long n)
11093 +/**
11094 + * copy_to_user: - Copy a block of data into user space.
11095 + * @to: Destination address, in user space.
11096 + * @from: Source address, in kernel space.
11097 + * @n: Number of bytes to copy.
11098 + *
11099 + * Context: User context only. This function may sleep.
11100 + *
11101 + * Copy data from kernel space to user space.
11102 + *
11103 + * Returns number of bytes that could not be copied.
11104 + * On success, this will be zero.
11105 + */
11106 +static inline unsigned long __must_check
11107 +copy_to_user(void __user *to, const void *from, unsigned long n)
11108 +{
11109 + int sz = __compiletime_object_size(from);
11110 +
11111 + if (unlikely(sz != -1 && sz < n))
11112 + copy_to_user_overflow();
11113 + else if (access_ok(VERIFY_WRITE, to, n))
11114 + n = __copy_to_user(to, from, n);
11115 + return n;
11116 +}
11117 +
11118 +/**
11119 + * copy_from_user: - Copy a block of data from user space.
11120 + * @to: Destination address, in kernel space.
11121 + * @from: Source address, in user space.
11122 + * @n: Number of bytes to copy.
11123 + *
11124 + * Context: User context only. This function may sleep.
11125 + *
11126 + * Copy data from user space to kernel space.
11127 + *
11128 + * Returns number of bytes that could not be copied.
11129 + * On success, this will be zero.
11130 + *
11131 + * If some data could not be copied, this function will pad the copied
11132 + * data to the requested size using zero bytes.
11133 + */
11134 +static inline unsigned long __must_check
11135 +copy_from_user(void *to, const void __user *from, unsigned long n)
11136 {
11137 int sz = __compiletime_object_size(to);
11138
11139 - if (likely(sz == -1 || sz >= n))
11140 - n = _copy_from_user(to, from, n);
11141 - else
11142 + if (unlikely(sz != -1 && sz < n))
11143 copy_from_user_overflow();
11144 -
11145 + else if (access_ok(VERIFY_READ, from, n))
11146 + n = __copy_from_user(to, from, n);
11147 + else if ((long)n > 0) {
11148 + if (!__builtin_constant_p(n))
11149 + check_object_size(to, n, false);
11150 + memset(to, 0, n);
11151 + }
11152 return n;
11153 }
11154
11155 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
11156 index 1c66d30..e66922c 100644
11157 --- a/arch/x86/include/asm/uaccess_64.h
11158 +++ b/arch/x86/include/asm/uaccess_64.h
11159 @@ -10,6 +10,9 @@
11160 #include <asm/alternative.h>
11161 #include <asm/cpufeature.h>
11162 #include <asm/page.h>
11163 +#include <asm/pgtable.h>
11164 +
11165 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
11166
11167 /*
11168 * Copy To/From Userspace
11169 @@ -17,12 +20,12 @@
11170
11171 /* Handles exceptions in both to and from, but doesn't do access_ok */
11172 __must_check unsigned long
11173 -copy_user_generic_string(void *to, const void *from, unsigned len);
11174 +copy_user_generic_string(void *to, const void *from, unsigned long len);
11175 __must_check unsigned long
11176 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
11177 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
11178
11179 static __always_inline __must_check unsigned long
11180 -copy_user_generic(void *to, const void *from, unsigned len)
11181 +copy_user_generic(void *to, const void *from, unsigned long len)
11182 {
11183 unsigned ret;
11184
11185 @@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
11186 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
11187 "=d" (len)),
11188 "1" (to), "2" (from), "3" (len)
11189 - : "memory", "rcx", "r8", "r9", "r10", "r11");
11190 + : "memory", "rcx", "r8", "r9", "r11");
11191 return ret;
11192 }
11193
11194 +static __always_inline __must_check unsigned long
11195 +__copy_to_user(void __user *to, const void *from, unsigned long len);
11196 +static __always_inline __must_check unsigned long
11197 +__copy_from_user(void *to, const void __user *from, unsigned long len);
11198 __must_check unsigned long
11199 -_copy_to_user(void __user *to, const void *from, unsigned len);
11200 -__must_check unsigned long
11201 -_copy_from_user(void *to, const void __user *from, unsigned len);
11202 -__must_check unsigned long
11203 -copy_in_user(void __user *to, const void __user *from, unsigned len);
11204 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
11205
11206 static inline unsigned long __must_check copy_from_user(void *to,
11207 const void __user *from,
11208 unsigned long n)
11209 {
11210 - int sz = __compiletime_object_size(to);
11211 -
11212 might_fault();
11213 - if (likely(sz == -1 || sz >= n))
11214 - n = _copy_from_user(to, from, n);
11215 -#ifdef CONFIG_DEBUG_VM
11216 - else
11217 - WARN(1, "Buffer overflow detected!\n");
11218 -#endif
11219 +
11220 + if (access_ok(VERIFY_READ, from, n))
11221 + n = __copy_from_user(to, from, n);
11222 + else if (n < INT_MAX) {
11223 + if (!__builtin_constant_p(n))
11224 + check_object_size(to, n, false);
11225 + memset(to, 0, n);
11226 + }
11227 return n;
11228 }
11229
11230 static __always_inline __must_check
11231 -int copy_to_user(void __user *dst, const void *src, unsigned size)
11232 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
11233 {
11234 might_fault();
11235
11236 - return _copy_to_user(dst, src, size);
11237 + if (access_ok(VERIFY_WRITE, dst, size))
11238 + size = __copy_to_user(dst, src, size);
11239 + return size;
11240 }
11241
11242 static __always_inline __must_check
11243 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
11244 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
11245 {
11246 - int ret = 0;
11247 + int sz = __compiletime_object_size(dst);
11248 + unsigned ret = 0;
11249
11250 might_fault();
11251 - if (!__builtin_constant_p(size))
11252 - return copy_user_generic(dst, (__force void *)src, size);
11253 +
11254 + if (size > INT_MAX)
11255 + return size;
11256 +
11257 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11258 + if (!__access_ok(VERIFY_READ, src, size))
11259 + return size;
11260 +#endif
11261 +
11262 + if (unlikely(sz != -1 && sz < size)) {
11263 +#ifdef CONFIG_DEBUG_VM
11264 + WARN(1, "Buffer overflow detected!\n");
11265 +#endif
11266 + return size;
11267 + }
11268 +
11269 + if (!__builtin_constant_p(size)) {
11270 + check_object_size(dst, size, false);
11271 +
11272 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11273 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11274 + src += PAX_USER_SHADOW_BASE;
11275 +#endif
11276 +
11277 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11278 + }
11279 switch (size) {
11280 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11281 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11282 ret, "b", "b", "=q", 1);
11283 return ret;
11284 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11285 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11286 ret, "w", "w", "=r", 2);
11287 return ret;
11288 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11289 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11290 ret, "l", "k", "=r", 4);
11291 return ret;
11292 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11293 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11294 ret, "q", "", "=r", 8);
11295 return ret;
11296 case 10:
11297 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11298 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11299 ret, "q", "", "=r", 10);
11300 if (unlikely(ret))
11301 return ret;
11302 __get_user_asm(*(u16 *)(8 + (char *)dst),
11303 - (u16 __user *)(8 + (char __user *)src),
11304 + (const u16 __user *)(8 + (const char __user *)src),
11305 ret, "w", "w", "=r", 2);
11306 return ret;
11307 case 16:
11308 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11309 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11310 ret, "q", "", "=r", 16);
11311 if (unlikely(ret))
11312 return ret;
11313 __get_user_asm(*(u64 *)(8 + (char *)dst),
11314 - (u64 __user *)(8 + (char __user *)src),
11315 + (const u64 __user *)(8 + (const char __user *)src),
11316 ret, "q", "", "=r", 8);
11317 return ret;
11318 default:
11319 - return copy_user_generic(dst, (__force void *)src, size);
11320 +
11321 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11322 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11323 + src += PAX_USER_SHADOW_BASE;
11324 +#endif
11325 +
11326 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11327 }
11328 }
11329
11330 static __always_inline __must_check
11331 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
11332 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11333 {
11334 - int ret = 0;
11335 + int sz = __compiletime_object_size(src);
11336 + unsigned ret = 0;
11337
11338 might_fault();
11339 - if (!__builtin_constant_p(size))
11340 - return copy_user_generic((__force void *)dst, src, size);
11341 +
11342 + if (size > INT_MAX)
11343 + return size;
11344 +
11345 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11346 + if (!__access_ok(VERIFY_WRITE, dst, size))
11347 + return size;
11348 +#endif
11349 +
11350 + if (unlikely(sz != -1 && sz < size)) {
11351 +#ifdef CONFIG_DEBUG_VM
11352 + WARN(1, "Buffer overflow detected!\n");
11353 +#endif
11354 + return size;
11355 + }
11356 +
11357 + if (!__builtin_constant_p(size)) {
11358 + check_object_size(src, size, true);
11359 +
11360 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11361 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11362 + dst += PAX_USER_SHADOW_BASE;
11363 +#endif
11364 +
11365 + return copy_user_generic((__force_kernel void *)dst, src, size);
11366 + }
11367 switch (size) {
11368 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11369 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11370 ret, "b", "b", "iq", 1);
11371 return ret;
11372 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11373 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11374 ret, "w", "w", "ir", 2);
11375 return ret;
11376 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11377 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11378 ret, "l", "k", "ir", 4);
11379 return ret;
11380 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11381 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11382 ret, "q", "", "er", 8);
11383 return ret;
11384 case 10:
11385 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11386 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11387 ret, "q", "", "er", 10);
11388 if (unlikely(ret))
11389 return ret;
11390 asm("":::"memory");
11391 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11392 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11393 ret, "w", "w", "ir", 2);
11394 return ret;
11395 case 16:
11396 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11397 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11398 ret, "q", "", "er", 16);
11399 if (unlikely(ret))
11400 return ret;
11401 asm("":::"memory");
11402 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11403 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11404 ret, "q", "", "er", 8);
11405 return ret;
11406 default:
11407 - return copy_user_generic((__force void *)dst, src, size);
11408 +
11409 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11410 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11411 + dst += PAX_USER_SHADOW_BASE;
11412 +#endif
11413 +
11414 + return copy_user_generic((__force_kernel void *)dst, src, size);
11415 }
11416 }
11417
11418 static __always_inline __must_check
11419 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11420 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11421 {
11422 - int ret = 0;
11423 + unsigned ret = 0;
11424
11425 might_fault();
11426 - if (!__builtin_constant_p(size))
11427 - return copy_user_generic((__force void *)dst,
11428 - (__force void *)src, size);
11429 +
11430 + if (size > INT_MAX)
11431 + return size;
11432 +
11433 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11434 + if (!__access_ok(VERIFY_READ, src, size))
11435 + return size;
11436 + if (!__access_ok(VERIFY_WRITE, dst, size))
11437 + return size;
11438 +#endif
11439 +
11440 + if (!__builtin_constant_p(size)) {
11441 +
11442 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11443 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11444 + src += PAX_USER_SHADOW_BASE;
11445 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11446 + dst += PAX_USER_SHADOW_BASE;
11447 +#endif
11448 +
11449 + return copy_user_generic((__force_kernel void *)dst,
11450 + (__force_kernel const void *)src, size);
11451 + }
11452 switch (size) {
11453 case 1: {
11454 u8 tmp;
11455 - __get_user_asm(tmp, (u8 __user *)src,
11456 + __get_user_asm(tmp, (const u8 __user *)src,
11457 ret, "b", "b", "=q", 1);
11458 if (likely(!ret))
11459 __put_user_asm(tmp, (u8 __user *)dst,
11460 @@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11461 }
11462 case 2: {
11463 u16 tmp;
11464 - __get_user_asm(tmp, (u16 __user *)src,
11465 + __get_user_asm(tmp, (const u16 __user *)src,
11466 ret, "w", "w", "=r", 2);
11467 if (likely(!ret))
11468 __put_user_asm(tmp, (u16 __user *)dst,
11469 @@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11470
11471 case 4: {
11472 u32 tmp;
11473 - __get_user_asm(tmp, (u32 __user *)src,
11474 + __get_user_asm(tmp, (const u32 __user *)src,
11475 ret, "l", "k", "=r", 4);
11476 if (likely(!ret))
11477 __put_user_asm(tmp, (u32 __user *)dst,
11478 @@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11479 }
11480 case 8: {
11481 u64 tmp;
11482 - __get_user_asm(tmp, (u64 __user *)src,
11483 + __get_user_asm(tmp, (const u64 __user *)src,
11484 ret, "q", "", "=r", 8);
11485 if (likely(!ret))
11486 __put_user_asm(tmp, (u64 __user *)dst,
11487 @@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11488 return ret;
11489 }
11490 default:
11491 - return copy_user_generic((__force void *)dst,
11492 - (__force void *)src, size);
11493 +
11494 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11495 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11496 + src += PAX_USER_SHADOW_BASE;
11497 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11498 + dst += PAX_USER_SHADOW_BASE;
11499 +#endif
11500 +
11501 + return copy_user_generic((__force_kernel void *)dst,
11502 + (__force_kernel const void *)src, size);
11503 }
11504 }
11505
11506 @@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11507 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11508
11509 static __must_check __always_inline int
11510 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11511 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11512 {
11513 - return copy_user_generic(dst, (__force const void *)src, size);
11514 + if (size > INT_MAX)
11515 + return size;
11516 +
11517 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11518 + if (!__access_ok(VERIFY_READ, src, size))
11519 + return size;
11520 +
11521 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11522 + src += PAX_USER_SHADOW_BASE;
11523 +#endif
11524 +
11525 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11526 }
11527
11528 -static __must_check __always_inline int
11529 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11530 +static __must_check __always_inline unsigned long
11531 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11532 {
11533 - return copy_user_generic((__force void *)dst, src, size);
11534 + if (size > INT_MAX)
11535 + return size;
11536 +
11537 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11538 + if (!__access_ok(VERIFY_WRITE, dst, size))
11539 + return size;
11540 +
11541 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11542 + dst += PAX_USER_SHADOW_BASE;
11543 +#endif
11544 +
11545 + return copy_user_generic((__force_kernel void *)dst, src, size);
11546 }
11547
11548 -extern long __copy_user_nocache(void *dst, const void __user *src,
11549 - unsigned size, int zerorest);
11550 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11551 + unsigned long size, int zerorest);
11552
11553 -static inline int
11554 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11555 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11556 {
11557 might_sleep();
11558 +
11559 + if (size > INT_MAX)
11560 + return size;
11561 +
11562 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11563 + if (!__access_ok(VERIFY_READ, src, size))
11564 + return size;
11565 +#endif
11566 +
11567 return __copy_user_nocache(dst, src, size, 1);
11568 }
11569
11570 -static inline int
11571 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11572 - unsigned size)
11573 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11574 + unsigned long size)
11575 {
11576 + if (size > INT_MAX)
11577 + return size;
11578 +
11579 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11580 + if (!__access_ok(VERIFY_READ, src, size))
11581 + return size;
11582 +#endif
11583 +
11584 return __copy_user_nocache(dst, src, size, 0);
11585 }
11586
11587 -unsigned long
11588 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11589 +extern unsigned long
11590 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11591
11592 #endif /* _ASM_X86_UACCESS_64_H */
11593 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11594 index bb05228..d763d5b 100644
11595 --- a/arch/x86/include/asm/vdso.h
11596 +++ b/arch/x86/include/asm/vdso.h
11597 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11598 #define VDSO32_SYMBOL(base, name) \
11599 ({ \
11600 extern const char VDSO32_##name[]; \
11601 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11602 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11603 })
11604 #endif
11605
11606 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11607 index 1971e65..1e3559b 100644
11608 --- a/arch/x86/include/asm/x86_init.h
11609 +++ b/arch/x86/include/asm/x86_init.h
11610 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11611 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11612 void (*find_smp_config)(void);
11613 void (*get_smp_config)(unsigned int early);
11614 -};
11615 +} __no_const;
11616
11617 /**
11618 * struct x86_init_resources - platform specific resource related ops
11619 @@ -42,7 +42,7 @@ struct x86_init_resources {
11620 void (*probe_roms)(void);
11621 void (*reserve_resources)(void);
11622 char *(*memory_setup)(void);
11623 -};
11624 +} __no_const;
11625
11626 /**
11627 * struct x86_init_irqs - platform specific interrupt setup
11628 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11629 void (*pre_vector_init)(void);
11630 void (*intr_init)(void);
11631 void (*trap_init)(void);
11632 -};
11633 +} __no_const;
11634
11635 /**
11636 * struct x86_init_oem - oem platform specific customizing functions
11637 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11638 struct x86_init_oem {
11639 void (*arch_setup)(void);
11640 void (*banner)(void);
11641 -};
11642 +} __no_const;
11643
11644 /**
11645 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11646 @@ -76,7 +76,7 @@ struct x86_init_oem {
11647 */
11648 struct x86_init_mapping {
11649 void (*pagetable_reserve)(u64 start, u64 end);
11650 -};
11651 +} __no_const;
11652
11653 /**
11654 * struct x86_init_paging - platform specific paging functions
11655 @@ -86,7 +86,7 @@ struct x86_init_mapping {
11656 struct x86_init_paging {
11657 void (*pagetable_setup_start)(pgd_t *base);
11658 void (*pagetable_setup_done)(pgd_t *base);
11659 -};
11660 +} __no_const;
11661
11662 /**
11663 * struct x86_init_timers - platform specific timer setup
11664 @@ -101,7 +101,7 @@ struct x86_init_timers {
11665 void (*tsc_pre_init)(void);
11666 void (*timer_init)(void);
11667 void (*wallclock_init)(void);
11668 -};
11669 +} __no_const;
11670
11671 /**
11672 * struct x86_init_iommu - platform specific iommu setup
11673 @@ -109,7 +109,7 @@ struct x86_init_timers {
11674 */
11675 struct x86_init_iommu {
11676 int (*iommu_init)(void);
11677 -};
11678 +} __no_const;
11679
11680 /**
11681 * struct x86_init_pci - platform specific pci init functions
11682 @@ -123,7 +123,7 @@ struct x86_init_pci {
11683 int (*init)(void);
11684 void (*init_irq)(void);
11685 void (*fixup_irqs)(void);
11686 -};
11687 +} __no_const;
11688
11689 /**
11690 * struct x86_init_ops - functions for platform specific setup
11691 @@ -139,7 +139,7 @@ struct x86_init_ops {
11692 struct x86_init_timers timers;
11693 struct x86_init_iommu iommu;
11694 struct x86_init_pci pci;
11695 -};
11696 +} __no_const;
11697
11698 /**
11699 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11700 @@ -147,7 +147,7 @@ struct x86_init_ops {
11701 */
11702 struct x86_cpuinit_ops {
11703 void (*setup_percpu_clockev)(void);
11704 -};
11705 +} __no_const;
11706
11707 /**
11708 * struct x86_platform_ops - platform specific runtime functions
11709 @@ -169,7 +169,7 @@ struct x86_platform_ops {
11710 void (*nmi_init)(void);
11711 unsigned char (*get_nmi_reason)(void);
11712 int (*i8042_detect)(void);
11713 -};
11714 +} __no_const;
11715
11716 struct pci_dev;
11717
11718 @@ -177,7 +177,7 @@ struct x86_msi_ops {
11719 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11720 void (*teardown_msi_irq)(unsigned int irq);
11721 void (*teardown_msi_irqs)(struct pci_dev *dev);
11722 -};
11723 +} __no_const;
11724
11725 extern struct x86_init_ops x86_init;
11726 extern struct x86_cpuinit_ops x86_cpuinit;
11727 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11728 index c6ce245..ffbdab7 100644
11729 --- a/arch/x86/include/asm/xsave.h
11730 +++ b/arch/x86/include/asm/xsave.h
11731 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11732 {
11733 int err;
11734
11735 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11736 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11737 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11738 +#endif
11739 +
11740 /*
11741 * Clear the xsave header first, so that reserved fields are
11742 * initialized to zero.
11743 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11744 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11745 {
11746 int err;
11747 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11748 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11749 u32 lmask = mask;
11750 u32 hmask = mask >> 32;
11751
11752 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11753 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11754 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11755 +#endif
11756 +
11757 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11758 "2:\n"
11759 ".section .fixup,\"ax\"\n"
11760 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11761 index 6a564ac..9b1340c 100644
11762 --- a/arch/x86/kernel/acpi/realmode/Makefile
11763 +++ b/arch/x86/kernel/acpi/realmode/Makefile
11764 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11765 $(call cc-option, -fno-stack-protector) \
11766 $(call cc-option, -mpreferred-stack-boundary=2)
11767 KBUILD_CFLAGS += $(call cc-option, -m32)
11768 +ifdef CONSTIFY_PLUGIN
11769 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11770 +endif
11771 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11772 GCOV_PROFILE := n
11773
11774 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11775 index b4fd836..4358fe3 100644
11776 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
11777 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11778 @@ -108,6 +108,9 @@ wakeup_code:
11779 /* Do any other stuff... */
11780
11781 #ifndef CONFIG_64BIT
11782 + /* Recheck NX bit overrides (64bit path does this in trampoline */
11783 + call verify_cpu
11784 +
11785 /* This could also be done in C code... */
11786 movl pmode_cr3, %eax
11787 movl %eax, %cr3
11788 @@ -131,6 +134,7 @@ wakeup_code:
11789 movl pmode_cr0, %eax
11790 movl %eax, %cr0
11791 jmp pmode_return
11792 +# include "../../verify_cpu.S"
11793 #else
11794 pushw $0
11795 pushw trampoline_segment
11796 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11797 index 103b6ab..2004d0a 100644
11798 --- a/arch/x86/kernel/acpi/sleep.c
11799 +++ b/arch/x86/kernel/acpi/sleep.c
11800 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11801 header->trampoline_segment = trampoline_address() >> 4;
11802 #ifdef CONFIG_SMP
11803 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11804 +
11805 + pax_open_kernel();
11806 early_gdt_descr.address =
11807 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11808 + pax_close_kernel();
11809 +
11810 initial_gs = per_cpu_offset(smp_processor_id());
11811 #endif
11812 initial_code = (unsigned long)wakeup_long64;
11813 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11814 index 13ab720..95d5442 100644
11815 --- a/arch/x86/kernel/acpi/wakeup_32.S
11816 +++ b/arch/x86/kernel/acpi/wakeup_32.S
11817 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11818 # and restore the stack ... but you need gdt for this to work
11819 movl saved_context_esp, %esp
11820
11821 - movl %cs:saved_magic, %eax
11822 - cmpl $0x12345678, %eax
11823 + cmpl $0x12345678, saved_magic
11824 jne bogus_magic
11825
11826 # jump to place where we left off
11827 - movl saved_eip, %eax
11828 - jmp *%eax
11829 + jmp *(saved_eip)
11830
11831 bogus_magic:
11832 jmp bogus_magic
11833 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11834 index 1f84794..e23f862 100644
11835 --- a/arch/x86/kernel/alternative.c
11836 +++ b/arch/x86/kernel/alternative.c
11837 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11838 */
11839 for (a = start; a < end; a++) {
11840 instr = (u8 *)&a->instr_offset + a->instr_offset;
11841 +
11842 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11843 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11844 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11845 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11846 +#endif
11847 +
11848 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11849 BUG_ON(a->replacementlen > a->instrlen);
11850 BUG_ON(a->instrlen > sizeof(insnbuf));
11851 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11852 for (poff = start; poff < end; poff++) {
11853 u8 *ptr = (u8 *)poff + *poff;
11854
11855 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11856 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11857 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11858 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11859 +#endif
11860 +
11861 if (!*poff || ptr < text || ptr >= text_end)
11862 continue;
11863 /* turn DS segment override prefix into lock prefix */
11864 - if (*ptr == 0x3e)
11865 + if (*ktla_ktva(ptr) == 0x3e)
11866 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11867 };
11868 mutex_unlock(&text_mutex);
11869 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11870 for (poff = start; poff < end; poff++) {
11871 u8 *ptr = (u8 *)poff + *poff;
11872
11873 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11874 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11875 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11876 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11877 +#endif
11878 +
11879 if (!*poff || ptr < text || ptr >= text_end)
11880 continue;
11881 /* turn lock prefix into DS segment override prefix */
11882 - if (*ptr == 0xf0)
11883 + if (*ktla_ktva(ptr) == 0xf0)
11884 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11885 };
11886 mutex_unlock(&text_mutex);
11887 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11888
11889 BUG_ON(p->len > MAX_PATCH_LEN);
11890 /* prep the buffer with the original instructions */
11891 - memcpy(insnbuf, p->instr, p->len);
11892 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11893 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11894 (unsigned long)p->instr, p->len);
11895
11896 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11897 if (smp_alt_once)
11898 free_init_pages("SMP alternatives",
11899 (unsigned long)__smp_locks,
11900 - (unsigned long)__smp_locks_end);
11901 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11902
11903 restart_nmi();
11904 }
11905 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11906 * instructions. And on the local CPU you need to be protected again NMI or MCE
11907 * handlers seeing an inconsistent instruction while you patch.
11908 */
11909 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
11910 +void *__kprobes text_poke_early(void *addr, const void *opcode,
11911 size_t len)
11912 {
11913 unsigned long flags;
11914 local_irq_save(flags);
11915 - memcpy(addr, opcode, len);
11916 +
11917 + pax_open_kernel();
11918 + memcpy(ktla_ktva(addr), opcode, len);
11919 sync_core();
11920 + pax_close_kernel();
11921 +
11922 local_irq_restore(flags);
11923 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11924 that causes hangs on some VIA CPUs. */
11925 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11926 */
11927 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11928 {
11929 - unsigned long flags;
11930 - char *vaddr;
11931 + unsigned char *vaddr = ktla_ktva(addr);
11932 struct page *pages[2];
11933 - int i;
11934 + size_t i;
11935
11936 if (!core_kernel_text((unsigned long)addr)) {
11937 - pages[0] = vmalloc_to_page(addr);
11938 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11939 + pages[0] = vmalloc_to_page(vaddr);
11940 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11941 } else {
11942 - pages[0] = virt_to_page(addr);
11943 + pages[0] = virt_to_page(vaddr);
11944 WARN_ON(!PageReserved(pages[0]));
11945 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11946 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11947 }
11948 BUG_ON(!pages[0]);
11949 - local_irq_save(flags);
11950 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11951 - if (pages[1])
11952 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11953 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11954 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11955 - clear_fixmap(FIX_TEXT_POKE0);
11956 - if (pages[1])
11957 - clear_fixmap(FIX_TEXT_POKE1);
11958 - local_flush_tlb();
11959 - sync_core();
11960 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11961 - that causes hangs on some VIA CPUs. */
11962 + text_poke_early(addr, opcode, len);
11963 for (i = 0; i < len; i++)
11964 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11965 - local_irq_restore(flags);
11966 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11967 return addr;
11968 }
11969
11970 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11971 index f98d84c..e402a69 100644
11972 --- a/arch/x86/kernel/apic/apic.c
11973 +++ b/arch/x86/kernel/apic/apic.c
11974 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11975 /*
11976 * Debug level, exported for io_apic.c
11977 */
11978 -unsigned int apic_verbosity;
11979 +int apic_verbosity;
11980
11981 int pic_mode;
11982
11983 @@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11984 apic_write(APIC_ESR, 0);
11985 v1 = apic_read(APIC_ESR);
11986 ack_APIC_irq();
11987 - atomic_inc(&irq_err_count);
11988 + atomic_inc_unchecked(&irq_err_count);
11989
11990 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11991 smp_processor_id(), v0 , v1);
11992 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11993 index 6d939d7..0697fcc 100644
11994 --- a/arch/x86/kernel/apic/io_apic.c
11995 +++ b/arch/x86/kernel/apic/io_apic.c
11996 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11997 }
11998 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11999
12000 -void lock_vector_lock(void)
12001 +void lock_vector_lock(void) __acquires(vector_lock)
12002 {
12003 /* Used to the online set of cpus does not change
12004 * during assign_irq_vector.
12005 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
12006 raw_spin_lock(&vector_lock);
12007 }
12008
12009 -void unlock_vector_lock(void)
12010 +void unlock_vector_lock(void) __releases(vector_lock)
12011 {
12012 raw_spin_unlock(&vector_lock);
12013 }
12014 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
12015 ack_APIC_irq();
12016 }
12017
12018 -atomic_t irq_mis_count;
12019 +atomic_unchecked_t irq_mis_count;
12020
12021 static void ack_apic_level(struct irq_data *data)
12022 {
12023 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
12024 * at the cpu.
12025 */
12026 if (!(v & (1 << (i & 0x1f)))) {
12027 - atomic_inc(&irq_mis_count);
12028 + atomic_inc_unchecked(&irq_mis_count);
12029
12030 eoi_ioapic_irq(irq, cfg);
12031 }
12032 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
12033 index a46bd38..6b906d7 100644
12034 --- a/arch/x86/kernel/apm_32.c
12035 +++ b/arch/x86/kernel/apm_32.c
12036 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
12037 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12038 * even though they are called in protected mode.
12039 */
12040 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12041 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12042 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12043
12044 static const char driver_version[] = "1.16ac"; /* no spaces */
12045 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
12046 BUG_ON(cpu != 0);
12047 gdt = get_cpu_gdt_table(cpu);
12048 save_desc_40 = gdt[0x40 / 8];
12049 +
12050 + pax_open_kernel();
12051 gdt[0x40 / 8] = bad_bios_desc;
12052 + pax_close_kernel();
12053
12054 apm_irq_save(flags);
12055 APM_DO_SAVE_SEGS;
12056 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
12057 &call->esi);
12058 APM_DO_RESTORE_SEGS;
12059 apm_irq_restore(flags);
12060 +
12061 + pax_open_kernel();
12062 gdt[0x40 / 8] = save_desc_40;
12063 + pax_close_kernel();
12064 +
12065 put_cpu();
12066
12067 return call->eax & 0xff;
12068 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
12069 BUG_ON(cpu != 0);
12070 gdt = get_cpu_gdt_table(cpu);
12071 save_desc_40 = gdt[0x40 / 8];
12072 +
12073 + pax_open_kernel();
12074 gdt[0x40 / 8] = bad_bios_desc;
12075 + pax_close_kernel();
12076
12077 apm_irq_save(flags);
12078 APM_DO_SAVE_SEGS;
12079 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
12080 &call->eax);
12081 APM_DO_RESTORE_SEGS;
12082 apm_irq_restore(flags);
12083 +
12084 + pax_open_kernel();
12085 gdt[0x40 / 8] = save_desc_40;
12086 + pax_close_kernel();
12087 +
12088 put_cpu();
12089 return error;
12090 }
12091 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
12092 * code to that CPU.
12093 */
12094 gdt = get_cpu_gdt_table(0);
12095 +
12096 + pax_open_kernel();
12097 set_desc_base(&gdt[APM_CS >> 3],
12098 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12099 set_desc_base(&gdt[APM_CS_16 >> 3],
12100 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12101 set_desc_base(&gdt[APM_DS >> 3],
12102 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12103 + pax_close_kernel();
12104
12105 proc_create("apm", 0, NULL, &apm_file_ops);
12106
12107 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
12108 index 4f13faf..87db5d2 100644
12109 --- a/arch/x86/kernel/asm-offsets.c
12110 +++ b/arch/x86/kernel/asm-offsets.c
12111 @@ -33,6 +33,8 @@ void common(void) {
12112 OFFSET(TI_status, thread_info, status);
12113 OFFSET(TI_addr_limit, thread_info, addr_limit);
12114 OFFSET(TI_preempt_count, thread_info, preempt_count);
12115 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12116 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12117
12118 BLANK();
12119 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
12120 @@ -53,8 +55,26 @@ void common(void) {
12121 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12122 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12123 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12124 +
12125 +#ifdef CONFIG_PAX_KERNEXEC
12126 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12127 #endif
12128
12129 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12130 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12131 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12132 +#ifdef CONFIG_X86_64
12133 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
12134 +#endif
12135 +#endif
12136 +
12137 +#endif
12138 +
12139 + BLANK();
12140 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12141 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12142 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12143 +
12144 #ifdef CONFIG_XEN
12145 BLANK();
12146 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12147 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
12148 index e72a119..6e2955d 100644
12149 --- a/arch/x86/kernel/asm-offsets_64.c
12150 +++ b/arch/x86/kernel/asm-offsets_64.c
12151 @@ -69,6 +69,7 @@ int main(void)
12152 BLANK();
12153 #undef ENTRY
12154
12155 + DEFINE(TSS_size, sizeof(struct tss_struct));
12156 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
12157 BLANK();
12158
12159 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
12160 index 25f24dc..4094a7f 100644
12161 --- a/arch/x86/kernel/cpu/Makefile
12162 +++ b/arch/x86/kernel/cpu/Makefile
12163 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
12164 CFLAGS_REMOVE_perf_event.o = -pg
12165 endif
12166
12167 -# Make sure load_percpu_segment has no stackprotector
12168 -nostackp := $(call cc-option, -fno-stack-protector)
12169 -CFLAGS_common.o := $(nostackp)
12170 -
12171 obj-y := intel_cacheinfo.o scattered.o topology.o
12172 obj-y += proc.o capflags.o powerflags.o common.o
12173 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
12174 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
12175 index 0bab2b1..d0a1bf8 100644
12176 --- a/arch/x86/kernel/cpu/amd.c
12177 +++ b/arch/x86/kernel/cpu/amd.c
12178 @@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
12179 unsigned int size)
12180 {
12181 /* AMD errata T13 (order #21922) */
12182 - if ((c->x86 == 6)) {
12183 + if (c->x86 == 6) {
12184 /* Duron Rev A0 */
12185 if (c->x86_model == 3 && c->x86_mask == 0)
12186 size = 64;
12187 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
12188 index aa003b1..47ea638 100644
12189 --- a/arch/x86/kernel/cpu/common.c
12190 +++ b/arch/x86/kernel/cpu/common.c
12191 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
12192
12193 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12194
12195 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12196 -#ifdef CONFIG_X86_64
12197 - /*
12198 - * We need valid kernel segments for data and code in long mode too
12199 - * IRET will check the segment types kkeil 2000/10/28
12200 - * Also sysret mandates a special GDT layout
12201 - *
12202 - * TLS descriptors are currently at a different place compared to i386.
12203 - * Hopefully nobody expects them at a fixed place (Wine?)
12204 - */
12205 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12206 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12207 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12208 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12209 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12210 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12211 -#else
12212 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12213 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12214 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12215 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12216 - /*
12217 - * Segments used for calling PnP BIOS have byte granularity.
12218 - * They code segments and data segments have fixed 64k limits,
12219 - * the transfer segment sizes are set at run time.
12220 - */
12221 - /* 32-bit code */
12222 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12223 - /* 16-bit code */
12224 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12225 - /* 16-bit data */
12226 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12227 - /* 16-bit data */
12228 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12229 - /* 16-bit data */
12230 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12231 - /*
12232 - * The APM segments have byte granularity and their bases
12233 - * are set at run time. All have 64k limits.
12234 - */
12235 - /* 32-bit code */
12236 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12237 - /* 16-bit code */
12238 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12239 - /* data */
12240 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12241 -
12242 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12243 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12244 - GDT_STACK_CANARY_INIT
12245 -#endif
12246 -} };
12247 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12248 -
12249 static int __init x86_xsave_setup(char *s)
12250 {
12251 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12252 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12253 {
12254 struct desc_ptr gdt_descr;
12255
12256 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12257 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12258 gdt_descr.size = GDT_SIZE - 1;
12259 load_gdt(&gdt_descr);
12260 /* Reload the per-cpu base */
12261 @@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12262 /* Filter out anything that depends on CPUID levels we don't have */
12263 filter_cpuid_features(c, true);
12264
12265 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12266 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12267 +#endif
12268 +
12269 /* If the model name is still unset, do table lookup. */
12270 if (!c->x86_model_id[0]) {
12271 const char *p;
12272 @@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12273 }
12274 __setup("clearcpuid=", setup_disablecpuid);
12275
12276 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12277 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12278 +
12279 #ifdef CONFIG_X86_64
12280 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12281
12282 @@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12283 EXPORT_PER_CPU_SYMBOL(current_task);
12284
12285 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12286 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12287 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12288 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12289
12290 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12291 @@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12292 {
12293 memset(regs, 0, sizeof(struct pt_regs));
12294 regs->fs = __KERNEL_PERCPU;
12295 - regs->gs = __KERNEL_STACK_CANARY;
12296 + savesegment(gs, regs->gs);
12297
12298 return regs;
12299 }
12300 @@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12301 int i;
12302
12303 cpu = stack_smp_processor_id();
12304 - t = &per_cpu(init_tss, cpu);
12305 + t = init_tss + cpu;
12306 oist = &per_cpu(orig_ist, cpu);
12307
12308 #ifdef CONFIG_NUMA
12309 @@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12310 switch_to_new_gdt(cpu);
12311 loadsegment(fs, 0);
12312
12313 - load_idt((const struct desc_ptr *)&idt_descr);
12314 + load_idt(&idt_descr);
12315
12316 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12317 syscall_init();
12318 @@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12319 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12320 barrier();
12321
12322 - x86_configure_nx();
12323 if (cpu != 0)
12324 enable_x2apic();
12325
12326 @@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12327 {
12328 int cpu = smp_processor_id();
12329 struct task_struct *curr = current;
12330 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12331 + struct tss_struct *t = init_tss + cpu;
12332 struct thread_struct *thread = &curr->thread;
12333
12334 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12335 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12336 index 5231312..a78a987 100644
12337 --- a/arch/x86/kernel/cpu/intel.c
12338 +++ b/arch/x86/kernel/cpu/intel.c
12339 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12340 * Update the IDT descriptor and reload the IDT so that
12341 * it uses the read-only mapped virtual address.
12342 */
12343 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12344 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12345 load_idt(&idt_descr);
12346 }
12347 #endif
12348 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12349 index 2af127d..8ff7ac0 100644
12350 --- a/arch/x86/kernel/cpu/mcheck/mce.c
12351 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
12352 @@ -42,6 +42,7 @@
12353 #include <asm/processor.h>
12354 #include <asm/mce.h>
12355 #include <asm/msr.h>
12356 +#include <asm/local.h>
12357
12358 #include "mce-internal.h"
12359
12360 @@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12361 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12362 m->cs, m->ip);
12363
12364 - if (m->cs == __KERNEL_CS)
12365 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12366 print_symbol("{%s}", m->ip);
12367 pr_cont("\n");
12368 }
12369 @@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12370
12371 #define PANIC_TIMEOUT 5 /* 5 seconds */
12372
12373 -static atomic_t mce_paniced;
12374 +static atomic_unchecked_t mce_paniced;
12375
12376 static int fake_panic;
12377 -static atomic_t mce_fake_paniced;
12378 +static atomic_unchecked_t mce_fake_paniced;
12379
12380 /* Panic in progress. Enable interrupts and wait for final IPI */
12381 static void wait_for_panic(void)
12382 @@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12383 /*
12384 * Make sure only one CPU runs in machine check panic
12385 */
12386 - if (atomic_inc_return(&mce_paniced) > 1)
12387 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12388 wait_for_panic();
12389 barrier();
12390
12391 @@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12392 console_verbose();
12393 } else {
12394 /* Don't log too much for fake panic */
12395 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12396 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12397 return;
12398 }
12399 /* First print corrected ones that are still unlogged */
12400 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12401 * might have been modified by someone else.
12402 */
12403 rmb();
12404 - if (atomic_read(&mce_paniced))
12405 + if (atomic_read_unchecked(&mce_paniced))
12406 wait_for_panic();
12407 if (!monarch_timeout)
12408 goto out;
12409 @@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12410 }
12411
12412 /* Call the installed machine check handler for this CPU setup. */
12413 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
12414 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12415 unexpected_machine_check;
12416
12417 /*
12418 @@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12419 return;
12420 }
12421
12422 + pax_open_kernel();
12423 machine_check_vector = do_machine_check;
12424 + pax_close_kernel();
12425
12426 __mcheck_cpu_init_generic();
12427 __mcheck_cpu_init_vendor(c);
12428 @@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12429 */
12430
12431 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12432 -static int mce_chrdev_open_count; /* #times opened */
12433 +static local_t mce_chrdev_open_count; /* #times opened */
12434 static int mce_chrdev_open_exclu; /* already open exclusive? */
12435
12436 static int mce_chrdev_open(struct inode *inode, struct file *file)
12437 @@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12438 spin_lock(&mce_chrdev_state_lock);
12439
12440 if (mce_chrdev_open_exclu ||
12441 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12442 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12443 spin_unlock(&mce_chrdev_state_lock);
12444
12445 return -EBUSY;
12446 @@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12447
12448 if (file->f_flags & O_EXCL)
12449 mce_chrdev_open_exclu = 1;
12450 - mce_chrdev_open_count++;
12451 + local_inc(&mce_chrdev_open_count);
12452
12453 spin_unlock(&mce_chrdev_state_lock);
12454
12455 @@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12456 {
12457 spin_lock(&mce_chrdev_state_lock);
12458
12459 - mce_chrdev_open_count--;
12460 + local_dec(&mce_chrdev_open_count);
12461 mce_chrdev_open_exclu = 0;
12462
12463 spin_unlock(&mce_chrdev_state_lock);
12464 @@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12465 static void mce_reset(void)
12466 {
12467 cpu_missing = 0;
12468 - atomic_set(&mce_fake_paniced, 0);
12469 + atomic_set_unchecked(&mce_fake_paniced, 0);
12470 atomic_set(&mce_executing, 0);
12471 atomic_set(&mce_callin, 0);
12472 atomic_set(&global_nwo, 0);
12473 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12474 index 5c0e653..0882b0a 100644
12475 --- a/arch/x86/kernel/cpu/mcheck/p5.c
12476 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
12477 @@ -12,6 +12,7 @@
12478 #include <asm/system.h>
12479 #include <asm/mce.h>
12480 #include <asm/msr.h>
12481 +#include <asm/pgtable.h>
12482
12483 /* By default disabled */
12484 int mce_p5_enabled __read_mostly;
12485 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12486 if (!cpu_has(c, X86_FEATURE_MCE))
12487 return;
12488
12489 + pax_open_kernel();
12490 machine_check_vector = pentium_machine_check;
12491 + pax_close_kernel();
12492 /* Make sure the vector pointer is visible before we enable MCEs: */
12493 wmb();
12494
12495 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12496 index 54060f5..c1a7577 100644
12497 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
12498 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12499 @@ -11,6 +11,7 @@
12500 #include <asm/system.h>
12501 #include <asm/mce.h>
12502 #include <asm/msr.h>
12503 +#include <asm/pgtable.h>
12504
12505 /* Machine check handler for WinChip C6: */
12506 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12507 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12508 {
12509 u32 lo, hi;
12510
12511 + pax_open_kernel();
12512 machine_check_vector = winchip_machine_check;
12513 + pax_close_kernel();
12514 /* Make sure the vector pointer is visible before we enable MCEs: */
12515 wmb();
12516
12517 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12518 index 6b96110..0da73eb 100644
12519 --- a/arch/x86/kernel/cpu/mtrr/main.c
12520 +++ b/arch/x86/kernel/cpu/mtrr/main.c
12521 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12522 u64 size_or_mask, size_and_mask;
12523 static bool mtrr_aps_delayed_init;
12524
12525 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12526 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12527
12528 const struct mtrr_ops *mtrr_if;
12529
12530 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12531 index df5e41f..816c719 100644
12532 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12533 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12534 @@ -25,7 +25,7 @@ struct mtrr_ops {
12535 int (*validate_add_page)(unsigned long base, unsigned long size,
12536 unsigned int type);
12537 int (*have_wrcomb)(void);
12538 -};
12539 +} __do_const;
12540
12541 extern int generic_get_free_region(unsigned long base, unsigned long size,
12542 int replace_reg);
12543 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12544 index 2bda212..78cc605 100644
12545 --- a/arch/x86/kernel/cpu/perf_event.c
12546 +++ b/arch/x86/kernel/cpu/perf_event.c
12547 @@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12548 break;
12549
12550 perf_callchain_store(entry, frame.return_address);
12551 - fp = frame.next_frame;
12552 + fp = (const void __force_user *)frame.next_frame;
12553 }
12554 }
12555
12556 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12557 index 13ad899..f642b9a 100644
12558 --- a/arch/x86/kernel/crash.c
12559 +++ b/arch/x86/kernel/crash.c
12560 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12561 {
12562 #ifdef CONFIG_X86_32
12563 struct pt_regs fixed_regs;
12564 -#endif
12565
12566 -#ifdef CONFIG_X86_32
12567 - if (!user_mode_vm(regs)) {
12568 + if (!user_mode(regs)) {
12569 crash_fixup_ss_esp(&fixed_regs, regs);
12570 regs = &fixed_regs;
12571 }
12572 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12573 index 37250fe..bf2ec74 100644
12574 --- a/arch/x86/kernel/doublefault_32.c
12575 +++ b/arch/x86/kernel/doublefault_32.c
12576 @@ -11,7 +11,7 @@
12577
12578 #define DOUBLEFAULT_STACKSIZE (1024)
12579 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12580 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12581 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12582
12583 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12584
12585 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12586 unsigned long gdt, tss;
12587
12588 store_gdt(&gdt_desc);
12589 - gdt = gdt_desc.address;
12590 + gdt = (unsigned long)gdt_desc.address;
12591
12592 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12593
12594 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12595 /* 0x2 bit is always set */
12596 .flags = X86_EFLAGS_SF | 0x2,
12597 .sp = STACK_START,
12598 - .es = __USER_DS,
12599 + .es = __KERNEL_DS,
12600 .cs = __KERNEL_CS,
12601 .ss = __KERNEL_DS,
12602 - .ds = __USER_DS,
12603 + .ds = __KERNEL_DS,
12604 .fs = __KERNEL_PERCPU,
12605
12606 .__cr3 = __pa_nodebug(swapper_pg_dir),
12607 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12608 index 1aae78f..aab3a3d 100644
12609 --- a/arch/x86/kernel/dumpstack.c
12610 +++ b/arch/x86/kernel/dumpstack.c
12611 @@ -2,6 +2,9 @@
12612 * Copyright (C) 1991, 1992 Linus Torvalds
12613 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12614 */
12615 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12616 +#define __INCLUDED_BY_HIDESYM 1
12617 +#endif
12618 #include <linux/kallsyms.h>
12619 #include <linux/kprobes.h>
12620 #include <linux/uaccess.h>
12621 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12622 static void
12623 print_ftrace_graph_addr(unsigned long addr, void *data,
12624 const struct stacktrace_ops *ops,
12625 - struct thread_info *tinfo, int *graph)
12626 + struct task_struct *task, int *graph)
12627 {
12628 - struct task_struct *task = tinfo->task;
12629 unsigned long ret_addr;
12630 int index = task->curr_ret_stack;
12631
12632 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12633 static inline void
12634 print_ftrace_graph_addr(unsigned long addr, void *data,
12635 const struct stacktrace_ops *ops,
12636 - struct thread_info *tinfo, int *graph)
12637 + struct task_struct *task, int *graph)
12638 { }
12639 #endif
12640
12641 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12642 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12643 */
12644
12645 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12646 - void *p, unsigned int size, void *end)
12647 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12648 {
12649 - void *t = tinfo;
12650 if (end) {
12651 if (p < end && p >= (end-THREAD_SIZE))
12652 return 1;
12653 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12654 }
12655
12656 unsigned long
12657 -print_context_stack(struct thread_info *tinfo,
12658 +print_context_stack(struct task_struct *task, void *stack_start,
12659 unsigned long *stack, unsigned long bp,
12660 const struct stacktrace_ops *ops, void *data,
12661 unsigned long *end, int *graph)
12662 {
12663 struct stack_frame *frame = (struct stack_frame *)bp;
12664
12665 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12666 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12667 unsigned long addr;
12668
12669 addr = *stack;
12670 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12671 } else {
12672 ops->address(data, addr, 0);
12673 }
12674 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12675 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12676 }
12677 stack++;
12678 }
12679 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12680 EXPORT_SYMBOL_GPL(print_context_stack);
12681
12682 unsigned long
12683 -print_context_stack_bp(struct thread_info *tinfo,
12684 +print_context_stack_bp(struct task_struct *task, void *stack_start,
12685 unsigned long *stack, unsigned long bp,
12686 const struct stacktrace_ops *ops, void *data,
12687 unsigned long *end, int *graph)
12688 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12689 struct stack_frame *frame = (struct stack_frame *)bp;
12690 unsigned long *ret_addr = &frame->return_address;
12691
12692 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12693 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12694 unsigned long addr = *ret_addr;
12695
12696 if (!__kernel_text_address(addr))
12697 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12698 ops->address(data, addr, 1);
12699 frame = frame->next_frame;
12700 ret_addr = &frame->return_address;
12701 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12702 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12703 }
12704
12705 return (unsigned long)frame;
12706 @@ -186,7 +186,7 @@ void dump_stack(void)
12707
12708 bp = stack_frame(current, NULL);
12709 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12710 - current->pid, current->comm, print_tainted(),
12711 + task_pid_nr(current), current->comm, print_tainted(),
12712 init_utsname()->release,
12713 (int)strcspn(init_utsname()->version, " "),
12714 init_utsname()->version);
12715 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12716 }
12717 EXPORT_SYMBOL_GPL(oops_begin);
12718
12719 +extern void gr_handle_kernel_exploit(void);
12720 +
12721 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12722 {
12723 if (regs && kexec_should_crash(current))
12724 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12725 panic("Fatal exception in interrupt");
12726 if (panic_on_oops)
12727 panic("Fatal exception");
12728 - do_exit(signr);
12729 +
12730 + gr_handle_kernel_exploit();
12731 +
12732 + do_group_exit(signr);
12733 }
12734
12735 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12736 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12737
12738 show_registers(regs);
12739 #ifdef CONFIG_X86_32
12740 - if (user_mode_vm(regs)) {
12741 + if (user_mode(regs)) {
12742 sp = regs->sp;
12743 ss = regs->ss & 0xffff;
12744 } else {
12745 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12746 unsigned long flags = oops_begin();
12747 int sig = SIGSEGV;
12748
12749 - if (!user_mode_vm(regs))
12750 + if (!user_mode(regs))
12751 report_bug(regs->ip, regs);
12752
12753 if (__die(str, regs, err))
12754 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12755 index c99f9ed..2a15d80 100644
12756 --- a/arch/x86/kernel/dumpstack_32.c
12757 +++ b/arch/x86/kernel/dumpstack_32.c
12758 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12759 bp = stack_frame(task, regs);
12760
12761 for (;;) {
12762 - struct thread_info *context;
12763 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12764
12765 - context = (struct thread_info *)
12766 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12767 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12768 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12769
12770 - stack = (unsigned long *)context->previous_esp;
12771 - if (!stack)
12772 + if (stack_start == task_stack_page(task))
12773 break;
12774 + stack = *(unsigned long **)stack_start;
12775 if (ops->stack(data, "IRQ") < 0)
12776 break;
12777 touch_nmi_watchdog();
12778 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12779 * When in-kernel, we also print out the stack and code at the
12780 * time of the fault..
12781 */
12782 - if (!user_mode_vm(regs)) {
12783 + if (!user_mode(regs)) {
12784 unsigned int code_prologue = code_bytes * 43 / 64;
12785 unsigned int code_len = code_bytes;
12786 unsigned char c;
12787 u8 *ip;
12788 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12789
12790 printk(KERN_EMERG "Stack:\n");
12791 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12792
12793 printk(KERN_EMERG "Code: ");
12794
12795 - ip = (u8 *)regs->ip - code_prologue;
12796 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12797 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12798 /* try starting at IP */
12799 - ip = (u8 *)regs->ip;
12800 + ip = (u8 *)regs->ip + cs_base;
12801 code_len = code_len - code_prologue + 1;
12802 }
12803 for (i = 0; i < code_len; i++, ip++) {
12804 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12805 printk(KERN_CONT " Bad EIP value.");
12806 break;
12807 }
12808 - if (ip == (u8 *)regs->ip)
12809 + if (ip == (u8 *)regs->ip + cs_base)
12810 printk(KERN_CONT "<%02x> ", c);
12811 else
12812 printk(KERN_CONT "%02x ", c);
12813 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12814 {
12815 unsigned short ud2;
12816
12817 + ip = ktla_ktva(ip);
12818 if (ip < PAGE_OFFSET)
12819 return 0;
12820 if (probe_kernel_address((unsigned short *)ip, ud2))
12821 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12822
12823 return ud2 == 0x0b0f;
12824 }
12825 +
12826 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12827 +void pax_check_alloca(unsigned long size)
12828 +{
12829 + unsigned long sp = (unsigned long)&sp, stack_left;
12830 +
12831 + /* all kernel stacks are of the same size */
12832 + stack_left = sp & (THREAD_SIZE - 1);
12833 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12834 +}
12835 +EXPORT_SYMBOL(pax_check_alloca);
12836 +#endif
12837 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12838 index 6d728d9..279514e 100644
12839 --- a/arch/x86/kernel/dumpstack_64.c
12840 +++ b/arch/x86/kernel/dumpstack_64.c
12841 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12842 unsigned long *irq_stack_end =
12843 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12844 unsigned used = 0;
12845 - struct thread_info *tinfo;
12846 int graph = 0;
12847 unsigned long dummy;
12848 + void *stack_start;
12849
12850 if (!task)
12851 task = current;
12852 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12853 * current stack address. If the stacks consist of nested
12854 * exceptions
12855 */
12856 - tinfo = task_thread_info(task);
12857 for (;;) {
12858 char *id;
12859 unsigned long *estack_end;
12860 +
12861 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12862 &used, &id);
12863
12864 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12865 if (ops->stack(data, id) < 0)
12866 break;
12867
12868 - bp = ops->walk_stack(tinfo, stack, bp, ops,
12869 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12870 data, estack_end, &graph);
12871 ops->stack(data, "<EOE>");
12872 /*
12873 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12874 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12875 if (ops->stack(data, "IRQ") < 0)
12876 break;
12877 - bp = ops->walk_stack(tinfo, stack, bp,
12878 + bp = ops->walk_stack(task, irq_stack, stack, bp,
12879 ops, data, irq_stack_end, &graph);
12880 /*
12881 * We link to the next stack (which would be
12882 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12883 /*
12884 * This handles the process stack:
12885 */
12886 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12887 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12888 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12889 put_cpu();
12890 }
12891 EXPORT_SYMBOL(dump_trace);
12892 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12893
12894 return ud2 == 0x0b0f;
12895 }
12896 +
12897 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12898 +void pax_check_alloca(unsigned long size)
12899 +{
12900 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12901 + unsigned cpu, used;
12902 + char *id;
12903 +
12904 + /* check the process stack first */
12905 + stack_start = (unsigned long)task_stack_page(current);
12906 + stack_end = stack_start + THREAD_SIZE;
12907 + if (likely(stack_start <= sp && sp < stack_end)) {
12908 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
12909 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12910 + return;
12911 + }
12912 +
12913 + cpu = get_cpu();
12914 +
12915 + /* check the irq stacks */
12916 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12917 + stack_start = stack_end - IRQ_STACK_SIZE;
12918 + if (stack_start <= sp && sp < stack_end) {
12919 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12920 + put_cpu();
12921 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12922 + return;
12923 + }
12924 +
12925 + /* check the exception stacks */
12926 + used = 0;
12927 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12928 + stack_start = stack_end - EXCEPTION_STKSZ;
12929 + if (stack_end && stack_start <= sp && sp < stack_end) {
12930 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12931 + put_cpu();
12932 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12933 + return;
12934 + }
12935 +
12936 + put_cpu();
12937 +
12938 + /* unknown stack */
12939 + BUG();
12940 +}
12941 +EXPORT_SYMBOL(pax_check_alloca);
12942 +#endif
12943 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12944 index cd28a35..c72ed9a 100644
12945 --- a/arch/x86/kernel/early_printk.c
12946 +++ b/arch/x86/kernel/early_printk.c
12947 @@ -7,6 +7,7 @@
12948 #include <linux/pci_regs.h>
12949 #include <linux/pci_ids.h>
12950 #include <linux/errno.h>
12951 +#include <linux/sched.h>
12952 #include <asm/io.h>
12953 #include <asm/processor.h>
12954 #include <asm/fcntl.h>
12955 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12956 index f3f6f53..0841b66 100644
12957 --- a/arch/x86/kernel/entry_32.S
12958 +++ b/arch/x86/kernel/entry_32.S
12959 @@ -186,13 +186,146 @@
12960 /*CFI_REL_OFFSET gs, PT_GS*/
12961 .endm
12962 .macro SET_KERNEL_GS reg
12963 +
12964 +#ifdef CONFIG_CC_STACKPROTECTOR
12965 movl $(__KERNEL_STACK_CANARY), \reg
12966 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12967 + movl $(__USER_DS), \reg
12968 +#else
12969 + xorl \reg, \reg
12970 +#endif
12971 +
12972 movl \reg, %gs
12973 .endm
12974
12975 #endif /* CONFIG_X86_32_LAZY_GS */
12976
12977 -.macro SAVE_ALL
12978 +.macro pax_enter_kernel
12979 +#ifdef CONFIG_PAX_KERNEXEC
12980 + call pax_enter_kernel
12981 +#endif
12982 +.endm
12983 +
12984 +.macro pax_exit_kernel
12985 +#ifdef CONFIG_PAX_KERNEXEC
12986 + call pax_exit_kernel
12987 +#endif
12988 +.endm
12989 +
12990 +#ifdef CONFIG_PAX_KERNEXEC
12991 +ENTRY(pax_enter_kernel)
12992 +#ifdef CONFIG_PARAVIRT
12993 + pushl %eax
12994 + pushl %ecx
12995 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12996 + mov %eax, %esi
12997 +#else
12998 + mov %cr0, %esi
12999 +#endif
13000 + bts $16, %esi
13001 + jnc 1f
13002 + mov %cs, %esi
13003 + cmp $__KERNEL_CS, %esi
13004 + jz 3f
13005 + ljmp $__KERNEL_CS, $3f
13006 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13007 +2:
13008 +#ifdef CONFIG_PARAVIRT
13009 + mov %esi, %eax
13010 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13011 +#else
13012 + mov %esi, %cr0
13013 +#endif
13014 +3:
13015 +#ifdef CONFIG_PARAVIRT
13016 + popl %ecx
13017 + popl %eax
13018 +#endif
13019 + ret
13020 +ENDPROC(pax_enter_kernel)
13021 +
13022 +ENTRY(pax_exit_kernel)
13023 +#ifdef CONFIG_PARAVIRT
13024 + pushl %eax
13025 + pushl %ecx
13026 +#endif
13027 + mov %cs, %esi
13028 + cmp $__KERNEXEC_KERNEL_CS, %esi
13029 + jnz 2f
13030 +#ifdef CONFIG_PARAVIRT
13031 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13032 + mov %eax, %esi
13033 +#else
13034 + mov %cr0, %esi
13035 +#endif
13036 + btr $16, %esi
13037 + ljmp $__KERNEL_CS, $1f
13038 +1:
13039 +#ifdef CONFIG_PARAVIRT
13040 + mov %esi, %eax
13041 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13042 +#else
13043 + mov %esi, %cr0
13044 +#endif
13045 +2:
13046 +#ifdef CONFIG_PARAVIRT
13047 + popl %ecx
13048 + popl %eax
13049 +#endif
13050 + ret
13051 +ENDPROC(pax_exit_kernel)
13052 +#endif
13053 +
13054 +.macro pax_erase_kstack
13055 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13056 + call pax_erase_kstack
13057 +#endif
13058 +.endm
13059 +
13060 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13061 +/*
13062 + * ebp: thread_info
13063 + * ecx, edx: can be clobbered
13064 + */
13065 +ENTRY(pax_erase_kstack)
13066 + pushl %edi
13067 + pushl %eax
13068 +
13069 + mov TI_lowest_stack(%ebp), %edi
13070 + mov $-0xBEEF, %eax
13071 + std
13072 +
13073 +1: mov %edi, %ecx
13074 + and $THREAD_SIZE_asm - 1, %ecx
13075 + shr $2, %ecx
13076 + repne scasl
13077 + jecxz 2f
13078 +
13079 + cmp $2*16, %ecx
13080 + jc 2f
13081 +
13082 + mov $2*16, %ecx
13083 + repe scasl
13084 + jecxz 2f
13085 + jne 1b
13086 +
13087 +2: cld
13088 + mov %esp, %ecx
13089 + sub %edi, %ecx
13090 + shr $2, %ecx
13091 + rep stosl
13092 +
13093 + mov TI_task_thread_sp0(%ebp), %edi
13094 + sub $128, %edi
13095 + mov %edi, TI_lowest_stack(%ebp)
13096 +
13097 + popl %eax
13098 + popl %edi
13099 + ret
13100 +ENDPROC(pax_erase_kstack)
13101 +#endif
13102 +
13103 +.macro __SAVE_ALL _DS
13104 cld
13105 PUSH_GS
13106 pushl_cfi %fs
13107 @@ -215,7 +348,7 @@
13108 CFI_REL_OFFSET ecx, 0
13109 pushl_cfi %ebx
13110 CFI_REL_OFFSET ebx, 0
13111 - movl $(__USER_DS), %edx
13112 + movl $\_DS, %edx
13113 movl %edx, %ds
13114 movl %edx, %es
13115 movl $(__KERNEL_PERCPU), %edx
13116 @@ -223,6 +356,15 @@
13117 SET_KERNEL_GS %edx
13118 .endm
13119
13120 +.macro SAVE_ALL
13121 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13122 + __SAVE_ALL __KERNEL_DS
13123 + pax_enter_kernel
13124 +#else
13125 + __SAVE_ALL __USER_DS
13126 +#endif
13127 +.endm
13128 +
13129 .macro RESTORE_INT_REGS
13130 popl_cfi %ebx
13131 CFI_RESTORE ebx
13132 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
13133 popfl_cfi
13134 jmp syscall_exit
13135 CFI_ENDPROC
13136 -END(ret_from_fork)
13137 +ENDPROC(ret_from_fork)
13138
13139 /*
13140 * Interrupt exit functions should be protected against kprobes
13141 @@ -333,7 +475,15 @@ check_userspace:
13142 movb PT_CS(%esp), %al
13143 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13144 cmpl $USER_RPL, %eax
13145 +
13146 +#ifdef CONFIG_PAX_KERNEXEC
13147 + jae resume_userspace
13148 +
13149 + PAX_EXIT_KERNEL
13150 + jmp resume_kernel
13151 +#else
13152 jb resume_kernel # not returning to v8086 or userspace
13153 +#endif
13154
13155 ENTRY(resume_userspace)
13156 LOCKDEP_SYS_EXIT
13157 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
13158 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13159 # int/exception return?
13160 jne work_pending
13161 - jmp restore_all
13162 -END(ret_from_exception)
13163 + jmp restore_all_pax
13164 +ENDPROC(ret_from_exception)
13165
13166 #ifdef CONFIG_PREEMPT
13167 ENTRY(resume_kernel)
13168 @@ -361,7 +511,7 @@ need_resched:
13169 jz restore_all
13170 call preempt_schedule_irq
13171 jmp need_resched
13172 -END(resume_kernel)
13173 +ENDPROC(resume_kernel)
13174 #endif
13175 CFI_ENDPROC
13176 /*
13177 @@ -395,23 +545,34 @@ sysenter_past_esp:
13178 /*CFI_REL_OFFSET cs, 0*/
13179 /*
13180 * Push current_thread_info()->sysenter_return to the stack.
13181 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13182 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13183 */
13184 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
13185 + pushl_cfi $0
13186 CFI_REL_OFFSET eip, 0
13187
13188 pushl_cfi %eax
13189 SAVE_ALL
13190 + GET_THREAD_INFO(%ebp)
13191 + movl TI_sysenter_return(%ebp),%ebp
13192 + movl %ebp,PT_EIP(%esp)
13193 ENABLE_INTERRUPTS(CLBR_NONE)
13194
13195 /*
13196 * Load the potential sixth argument from user stack.
13197 * Careful about security.
13198 */
13199 + movl PT_OLDESP(%esp),%ebp
13200 +
13201 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13202 + mov PT_OLDSS(%esp),%ds
13203 +1: movl %ds:(%ebp),%ebp
13204 + push %ss
13205 + pop %ds
13206 +#else
13207 cmpl $__PAGE_OFFSET-3,%ebp
13208 jae syscall_fault
13209 1: movl (%ebp),%ebp
13210 +#endif
13211 +
13212 movl %ebp,PT_EBP(%esp)
13213 .section __ex_table,"a"
13214 .align 4
13215 @@ -434,12 +595,24 @@ sysenter_do_call:
13216 testl $_TIF_ALLWORK_MASK, %ecx
13217 jne sysexit_audit
13218 sysenter_exit:
13219 +
13220 +#ifdef CONFIG_PAX_RANDKSTACK
13221 + pushl_cfi %eax
13222 + movl %esp, %eax
13223 + call pax_randomize_kstack
13224 + popl_cfi %eax
13225 +#endif
13226 +
13227 + pax_erase_kstack
13228 +
13229 /* if something modifies registers it must also disable sysexit */
13230 movl PT_EIP(%esp), %edx
13231 movl PT_OLDESP(%esp), %ecx
13232 xorl %ebp,%ebp
13233 TRACE_IRQS_ON
13234 1: mov PT_FS(%esp), %fs
13235 +2: mov PT_DS(%esp), %ds
13236 +3: mov PT_ES(%esp), %es
13237 PTGS_TO_GS
13238 ENABLE_INTERRUPTS_SYSEXIT
13239
13240 @@ -456,6 +629,9 @@ sysenter_audit:
13241 movl %eax,%edx /* 2nd arg: syscall number */
13242 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13243 call audit_syscall_entry
13244 +
13245 + pax_erase_kstack
13246 +
13247 pushl_cfi %ebx
13248 movl PT_EAX(%esp),%eax /* reload syscall number */
13249 jmp sysenter_do_call
13250 @@ -482,11 +658,17 @@ sysexit_audit:
13251
13252 CFI_ENDPROC
13253 .pushsection .fixup,"ax"
13254 -2: movl $0,PT_FS(%esp)
13255 +4: movl $0,PT_FS(%esp)
13256 + jmp 1b
13257 +5: movl $0,PT_DS(%esp)
13258 + jmp 1b
13259 +6: movl $0,PT_ES(%esp)
13260 jmp 1b
13261 .section __ex_table,"a"
13262 .align 4
13263 - .long 1b,2b
13264 + .long 1b,4b
13265 + .long 2b,5b
13266 + .long 3b,6b
13267 .popsection
13268 PTGS_TO_GS_EX
13269 ENDPROC(ia32_sysenter_target)
13270 @@ -519,6 +701,15 @@ syscall_exit:
13271 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13272 jne syscall_exit_work
13273
13274 +restore_all_pax:
13275 +
13276 +#ifdef CONFIG_PAX_RANDKSTACK
13277 + movl %esp, %eax
13278 + call pax_randomize_kstack
13279 +#endif
13280 +
13281 + pax_erase_kstack
13282 +
13283 restore_all:
13284 TRACE_IRQS_IRET
13285 restore_all_notrace:
13286 @@ -578,14 +769,34 @@ ldt_ss:
13287 * compensating for the offset by changing to the ESPFIX segment with
13288 * a base address that matches for the difference.
13289 */
13290 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13291 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13292 mov %esp, %edx /* load kernel esp */
13293 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13294 mov %dx, %ax /* eax: new kernel esp */
13295 sub %eax, %edx /* offset (low word is 0) */
13296 +#ifdef CONFIG_SMP
13297 + movl PER_CPU_VAR(cpu_number), %ebx
13298 + shll $PAGE_SHIFT_asm, %ebx
13299 + addl $cpu_gdt_table, %ebx
13300 +#else
13301 + movl $cpu_gdt_table, %ebx
13302 +#endif
13303 shr $16, %edx
13304 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13305 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13306 +
13307 +#ifdef CONFIG_PAX_KERNEXEC
13308 + mov %cr0, %esi
13309 + btr $16, %esi
13310 + mov %esi, %cr0
13311 +#endif
13312 +
13313 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13314 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13315 +
13316 +#ifdef CONFIG_PAX_KERNEXEC
13317 + bts $16, %esi
13318 + mov %esi, %cr0
13319 +#endif
13320 +
13321 pushl_cfi $__ESPFIX_SS
13322 pushl_cfi %eax /* new kernel esp */
13323 /* Disable interrupts, but do not irqtrace this section: we
13324 @@ -614,34 +825,28 @@ work_resched:
13325 movl TI_flags(%ebp), %ecx
13326 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13327 # than syscall tracing?
13328 - jz restore_all
13329 + jz restore_all_pax
13330 testb $_TIF_NEED_RESCHED, %cl
13331 jnz work_resched
13332
13333 work_notifysig: # deal with pending signals and
13334 # notify-resume requests
13335 + movl %esp, %eax
13336 #ifdef CONFIG_VM86
13337 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13338 - movl %esp, %eax
13339 - jne work_notifysig_v86 # returning to kernel-space or
13340 + jz 1f # returning to kernel-space or
13341 # vm86-space
13342 - xorl %edx, %edx
13343 - call do_notify_resume
13344 - jmp resume_userspace_sig
13345
13346 - ALIGN
13347 -work_notifysig_v86:
13348 pushl_cfi %ecx # save ti_flags for do_notify_resume
13349 call save_v86_state # %eax contains pt_regs pointer
13350 popl_cfi %ecx
13351 movl %eax, %esp
13352 -#else
13353 - movl %esp, %eax
13354 +1:
13355 #endif
13356 xorl %edx, %edx
13357 call do_notify_resume
13358 jmp resume_userspace_sig
13359 -END(work_pending)
13360 +ENDPROC(work_pending)
13361
13362 # perform syscall exit tracing
13363 ALIGN
13364 @@ -649,11 +854,14 @@ syscall_trace_entry:
13365 movl $-ENOSYS,PT_EAX(%esp)
13366 movl %esp, %eax
13367 call syscall_trace_enter
13368 +
13369 + pax_erase_kstack
13370 +
13371 /* What it returned is what we'll actually use. */
13372 cmpl $(nr_syscalls), %eax
13373 jnae syscall_call
13374 jmp syscall_exit
13375 -END(syscall_trace_entry)
13376 +ENDPROC(syscall_trace_entry)
13377
13378 # perform syscall exit tracing
13379 ALIGN
13380 @@ -666,20 +874,24 @@ syscall_exit_work:
13381 movl %esp, %eax
13382 call syscall_trace_leave
13383 jmp resume_userspace
13384 -END(syscall_exit_work)
13385 +ENDPROC(syscall_exit_work)
13386 CFI_ENDPROC
13387
13388 RING0_INT_FRAME # can't unwind into user space anyway
13389 syscall_fault:
13390 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13391 + push %ss
13392 + pop %ds
13393 +#endif
13394 GET_THREAD_INFO(%ebp)
13395 movl $-EFAULT,PT_EAX(%esp)
13396 jmp resume_userspace
13397 -END(syscall_fault)
13398 +ENDPROC(syscall_fault)
13399
13400 syscall_badsys:
13401 movl $-ENOSYS,PT_EAX(%esp)
13402 jmp resume_userspace
13403 -END(syscall_badsys)
13404 +ENDPROC(syscall_badsys)
13405 CFI_ENDPROC
13406 /*
13407 * End of kprobes section
13408 @@ -753,6 +965,36 @@ ptregs_clone:
13409 CFI_ENDPROC
13410 ENDPROC(ptregs_clone)
13411
13412 + ALIGN;
13413 +ENTRY(kernel_execve)
13414 + CFI_STARTPROC
13415 + pushl_cfi %ebp
13416 + sub $PT_OLDSS+4,%esp
13417 + pushl_cfi %edi
13418 + pushl_cfi %ecx
13419 + pushl_cfi %eax
13420 + lea 3*4(%esp),%edi
13421 + mov $PT_OLDSS/4+1,%ecx
13422 + xorl %eax,%eax
13423 + rep stosl
13424 + popl_cfi %eax
13425 + popl_cfi %ecx
13426 + popl_cfi %edi
13427 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13428 + pushl_cfi %esp
13429 + call sys_execve
13430 + add $4,%esp
13431 + CFI_ADJUST_CFA_OFFSET -4
13432 + GET_THREAD_INFO(%ebp)
13433 + test %eax,%eax
13434 + jz syscall_exit
13435 + add $PT_OLDSS+4,%esp
13436 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13437 + popl_cfi %ebp
13438 + ret
13439 + CFI_ENDPROC
13440 +ENDPROC(kernel_execve)
13441 +
13442 .macro FIXUP_ESPFIX_STACK
13443 /*
13444 * Switch back for ESPFIX stack to the normal zerobased stack
13445 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13446 * normal stack and adjusts ESP with the matching offset.
13447 */
13448 /* fixup the stack */
13449 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13450 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13451 +#ifdef CONFIG_SMP
13452 + movl PER_CPU_VAR(cpu_number), %ebx
13453 + shll $PAGE_SHIFT_asm, %ebx
13454 + addl $cpu_gdt_table, %ebx
13455 +#else
13456 + movl $cpu_gdt_table, %ebx
13457 +#endif
13458 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13459 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13460 shl $16, %eax
13461 addl %esp, %eax /* the adjusted stack pointer */
13462 pushl_cfi $__KERNEL_DS
13463 @@ -816,7 +1065,7 @@ vector=vector+1
13464 .endr
13465 2: jmp common_interrupt
13466 .endr
13467 -END(irq_entries_start)
13468 +ENDPROC(irq_entries_start)
13469
13470 .previous
13471 END(interrupt)
13472 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13473 pushl_cfi $do_coprocessor_error
13474 jmp error_code
13475 CFI_ENDPROC
13476 -END(coprocessor_error)
13477 +ENDPROC(coprocessor_error)
13478
13479 ENTRY(simd_coprocessor_error)
13480 RING0_INT_FRAME
13481 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13482 #endif
13483 jmp error_code
13484 CFI_ENDPROC
13485 -END(simd_coprocessor_error)
13486 +ENDPROC(simd_coprocessor_error)
13487
13488 ENTRY(device_not_available)
13489 RING0_INT_FRAME
13490 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13491 pushl_cfi $do_device_not_available
13492 jmp error_code
13493 CFI_ENDPROC
13494 -END(device_not_available)
13495 +ENDPROC(device_not_available)
13496
13497 #ifdef CONFIG_PARAVIRT
13498 ENTRY(native_iret)
13499 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
13500 .align 4
13501 .long native_iret, iret_exc
13502 .previous
13503 -END(native_iret)
13504 +ENDPROC(native_iret)
13505
13506 ENTRY(native_irq_enable_sysexit)
13507 sti
13508 sysexit
13509 -END(native_irq_enable_sysexit)
13510 +ENDPROC(native_irq_enable_sysexit)
13511 #endif
13512
13513 ENTRY(overflow)
13514 @@ -916,7 +1165,7 @@ ENTRY(overflow)
13515 pushl_cfi $do_overflow
13516 jmp error_code
13517 CFI_ENDPROC
13518 -END(overflow)
13519 +ENDPROC(overflow)
13520
13521 ENTRY(bounds)
13522 RING0_INT_FRAME
13523 @@ -924,7 +1173,7 @@ ENTRY(bounds)
13524 pushl_cfi $do_bounds
13525 jmp error_code
13526 CFI_ENDPROC
13527 -END(bounds)
13528 +ENDPROC(bounds)
13529
13530 ENTRY(invalid_op)
13531 RING0_INT_FRAME
13532 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13533 pushl_cfi $do_invalid_op
13534 jmp error_code
13535 CFI_ENDPROC
13536 -END(invalid_op)
13537 +ENDPROC(invalid_op)
13538
13539 ENTRY(coprocessor_segment_overrun)
13540 RING0_INT_FRAME
13541 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13542 pushl_cfi $do_coprocessor_segment_overrun
13543 jmp error_code
13544 CFI_ENDPROC
13545 -END(coprocessor_segment_overrun)
13546 +ENDPROC(coprocessor_segment_overrun)
13547
13548 ENTRY(invalid_TSS)
13549 RING0_EC_FRAME
13550 pushl_cfi $do_invalid_TSS
13551 jmp error_code
13552 CFI_ENDPROC
13553 -END(invalid_TSS)
13554 +ENDPROC(invalid_TSS)
13555
13556 ENTRY(segment_not_present)
13557 RING0_EC_FRAME
13558 pushl_cfi $do_segment_not_present
13559 jmp error_code
13560 CFI_ENDPROC
13561 -END(segment_not_present)
13562 +ENDPROC(segment_not_present)
13563
13564 ENTRY(stack_segment)
13565 RING0_EC_FRAME
13566 pushl_cfi $do_stack_segment
13567 jmp error_code
13568 CFI_ENDPROC
13569 -END(stack_segment)
13570 +ENDPROC(stack_segment)
13571
13572 ENTRY(alignment_check)
13573 RING0_EC_FRAME
13574 pushl_cfi $do_alignment_check
13575 jmp error_code
13576 CFI_ENDPROC
13577 -END(alignment_check)
13578 +ENDPROC(alignment_check)
13579
13580 ENTRY(divide_error)
13581 RING0_INT_FRAME
13582 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
13583 pushl_cfi $do_divide_error
13584 jmp error_code
13585 CFI_ENDPROC
13586 -END(divide_error)
13587 +ENDPROC(divide_error)
13588
13589 #ifdef CONFIG_X86_MCE
13590 ENTRY(machine_check)
13591 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
13592 pushl_cfi machine_check_vector
13593 jmp error_code
13594 CFI_ENDPROC
13595 -END(machine_check)
13596 +ENDPROC(machine_check)
13597 #endif
13598
13599 ENTRY(spurious_interrupt_bug)
13600 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13601 pushl_cfi $do_spurious_interrupt_bug
13602 jmp error_code
13603 CFI_ENDPROC
13604 -END(spurious_interrupt_bug)
13605 +ENDPROC(spurious_interrupt_bug)
13606 /*
13607 * End of kprobes section
13608 */
13609 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13610
13611 ENTRY(mcount)
13612 ret
13613 -END(mcount)
13614 +ENDPROC(mcount)
13615
13616 ENTRY(ftrace_caller)
13617 cmpl $0, function_trace_stop
13618 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
13619 .globl ftrace_stub
13620 ftrace_stub:
13621 ret
13622 -END(ftrace_caller)
13623 +ENDPROC(ftrace_caller)
13624
13625 #else /* ! CONFIG_DYNAMIC_FTRACE */
13626
13627 @@ -1174,7 +1423,7 @@ trace:
13628 popl %ecx
13629 popl %eax
13630 jmp ftrace_stub
13631 -END(mcount)
13632 +ENDPROC(mcount)
13633 #endif /* CONFIG_DYNAMIC_FTRACE */
13634 #endif /* CONFIG_FUNCTION_TRACER */
13635
13636 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13637 popl %ecx
13638 popl %eax
13639 ret
13640 -END(ftrace_graph_caller)
13641 +ENDPROC(ftrace_graph_caller)
13642
13643 .globl return_to_handler
13644 return_to_handler:
13645 @@ -1209,7 +1458,6 @@ return_to_handler:
13646 jmp *%ecx
13647 #endif
13648
13649 -.section .rodata,"a"
13650 #include "syscall_table_32.S"
13651
13652 syscall_table_size=(.-sys_call_table)
13653 @@ -1255,15 +1503,18 @@ error_code:
13654 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13655 REG_TO_PTGS %ecx
13656 SET_KERNEL_GS %ecx
13657 - movl $(__USER_DS), %ecx
13658 + movl $(__KERNEL_DS), %ecx
13659 movl %ecx, %ds
13660 movl %ecx, %es
13661 +
13662 + pax_enter_kernel
13663 +
13664 TRACE_IRQS_OFF
13665 movl %esp,%eax # pt_regs pointer
13666 call *%edi
13667 jmp ret_from_exception
13668 CFI_ENDPROC
13669 -END(page_fault)
13670 +ENDPROC(page_fault)
13671
13672 /*
13673 * Debug traps and NMI can happen at the one SYSENTER instruction
13674 @@ -1305,7 +1556,7 @@ debug_stack_correct:
13675 call do_debug
13676 jmp ret_from_exception
13677 CFI_ENDPROC
13678 -END(debug)
13679 +ENDPROC(debug)
13680
13681 /*
13682 * NMI is doubly nasty. It can happen _while_ we're handling
13683 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
13684 xorl %edx,%edx # zero error code
13685 movl %esp,%eax # pt_regs pointer
13686 call do_nmi
13687 +
13688 + pax_exit_kernel
13689 +
13690 jmp restore_all_notrace
13691 CFI_ENDPROC
13692
13693 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13694 FIXUP_ESPFIX_STACK # %eax == %esp
13695 xorl %edx,%edx # zero error code
13696 call do_nmi
13697 +
13698 + pax_exit_kernel
13699 +
13700 RESTORE_REGS
13701 lss 12+4(%esp), %esp # back to espfix stack
13702 CFI_ADJUST_CFA_OFFSET -24
13703 jmp irq_return
13704 CFI_ENDPROC
13705 -END(nmi)
13706 +ENDPROC(nmi)
13707
13708 ENTRY(int3)
13709 RING0_INT_FRAME
13710 @@ -1395,14 +1652,14 @@ ENTRY(int3)
13711 call do_int3
13712 jmp ret_from_exception
13713 CFI_ENDPROC
13714 -END(int3)
13715 +ENDPROC(int3)
13716
13717 ENTRY(general_protection)
13718 RING0_EC_FRAME
13719 pushl_cfi $do_general_protection
13720 jmp error_code
13721 CFI_ENDPROC
13722 -END(general_protection)
13723 +ENDPROC(general_protection)
13724
13725 #ifdef CONFIG_KVM_GUEST
13726 ENTRY(async_page_fault)
13727 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13728 pushl_cfi $do_async_page_fault
13729 jmp error_code
13730 CFI_ENDPROC
13731 -END(async_page_fault)
13732 +ENDPROC(async_page_fault)
13733 #endif
13734
13735 /*
13736 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13737 index faf8d5e..4f16a68 100644
13738 --- a/arch/x86/kernel/entry_64.S
13739 +++ b/arch/x86/kernel/entry_64.S
13740 @@ -55,6 +55,8 @@
13741 #include <asm/paravirt.h>
13742 #include <asm/ftrace.h>
13743 #include <asm/percpu.h>
13744 +#include <asm/pgtable.h>
13745 +#include <asm/alternative-asm.h>
13746
13747 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13748 #include <linux/elf-em.h>
13749 @@ -68,8 +70,9 @@
13750 #ifdef CONFIG_FUNCTION_TRACER
13751 #ifdef CONFIG_DYNAMIC_FTRACE
13752 ENTRY(mcount)
13753 + pax_force_retaddr
13754 retq
13755 -END(mcount)
13756 +ENDPROC(mcount)
13757
13758 ENTRY(ftrace_caller)
13759 cmpl $0, function_trace_stop
13760 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13761 #endif
13762
13763 GLOBAL(ftrace_stub)
13764 + pax_force_retaddr
13765 retq
13766 -END(ftrace_caller)
13767 +ENDPROC(ftrace_caller)
13768
13769 #else /* ! CONFIG_DYNAMIC_FTRACE */
13770 ENTRY(mcount)
13771 @@ -112,6 +116,7 @@ ENTRY(mcount)
13772 #endif
13773
13774 GLOBAL(ftrace_stub)
13775 + pax_force_retaddr
13776 retq
13777
13778 trace:
13779 @@ -121,12 +126,13 @@ trace:
13780 movq 8(%rbp), %rsi
13781 subq $MCOUNT_INSN_SIZE, %rdi
13782
13783 + pax_force_fptr ftrace_trace_function
13784 call *ftrace_trace_function
13785
13786 MCOUNT_RESTORE_FRAME
13787
13788 jmp ftrace_stub
13789 -END(mcount)
13790 +ENDPROC(mcount)
13791 #endif /* CONFIG_DYNAMIC_FTRACE */
13792 #endif /* CONFIG_FUNCTION_TRACER */
13793
13794 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13795
13796 MCOUNT_RESTORE_FRAME
13797
13798 + pax_force_retaddr
13799 retq
13800 -END(ftrace_graph_caller)
13801 +ENDPROC(ftrace_graph_caller)
13802
13803 GLOBAL(return_to_handler)
13804 subq $24, %rsp
13805 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13806 movq 8(%rsp), %rdx
13807 movq (%rsp), %rax
13808 addq $24, %rsp
13809 + pax_force_fptr %rdi
13810 jmp *%rdi
13811 #endif
13812
13813 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13814 ENDPROC(native_usergs_sysret64)
13815 #endif /* CONFIG_PARAVIRT */
13816
13817 + .macro ljmpq sel, off
13818 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13819 + .byte 0x48; ljmp *1234f(%rip)
13820 + .pushsection .rodata
13821 + .align 16
13822 + 1234: .quad \off; .word \sel
13823 + .popsection
13824 +#else
13825 + pushq $\sel
13826 + pushq $\off
13827 + lretq
13828 +#endif
13829 + .endm
13830 +
13831 + .macro pax_enter_kernel
13832 + pax_set_fptr_mask
13833 +#ifdef CONFIG_PAX_KERNEXEC
13834 + call pax_enter_kernel
13835 +#endif
13836 + .endm
13837 +
13838 + .macro pax_exit_kernel
13839 +#ifdef CONFIG_PAX_KERNEXEC
13840 + call pax_exit_kernel
13841 +#endif
13842 + .endm
13843 +
13844 +#ifdef CONFIG_PAX_KERNEXEC
13845 +ENTRY(pax_enter_kernel)
13846 + pushq %rdi
13847 +
13848 +#ifdef CONFIG_PARAVIRT
13849 + PV_SAVE_REGS(CLBR_RDI)
13850 +#endif
13851 +
13852 + GET_CR0_INTO_RDI
13853 + bts $16,%rdi
13854 + jnc 3f
13855 + mov %cs,%edi
13856 + cmp $__KERNEL_CS,%edi
13857 + jnz 2f
13858 +1:
13859 +
13860 +#ifdef CONFIG_PARAVIRT
13861 + PV_RESTORE_REGS(CLBR_RDI)
13862 +#endif
13863 +
13864 + popq %rdi
13865 + pax_force_retaddr
13866 + retq
13867 +
13868 +2: ljmpq __KERNEL_CS,1f
13869 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
13870 +4: SET_RDI_INTO_CR0
13871 + jmp 1b
13872 +ENDPROC(pax_enter_kernel)
13873 +
13874 +ENTRY(pax_exit_kernel)
13875 + pushq %rdi
13876 +
13877 +#ifdef CONFIG_PARAVIRT
13878 + PV_SAVE_REGS(CLBR_RDI)
13879 +#endif
13880 +
13881 + mov %cs,%rdi
13882 + cmp $__KERNEXEC_KERNEL_CS,%edi
13883 + jz 2f
13884 +1:
13885 +
13886 +#ifdef CONFIG_PARAVIRT
13887 + PV_RESTORE_REGS(CLBR_RDI);
13888 +#endif
13889 +
13890 + popq %rdi
13891 + pax_force_retaddr
13892 + retq
13893 +
13894 +2: GET_CR0_INTO_RDI
13895 + btr $16,%rdi
13896 + ljmpq __KERNEL_CS,3f
13897 +3: SET_RDI_INTO_CR0
13898 + jmp 1b
13899 +#ifdef CONFIG_PARAVIRT
13900 + PV_RESTORE_REGS(CLBR_RDI);
13901 +#endif
13902 +
13903 + popq %rdi
13904 + pax_force_retaddr
13905 + retq
13906 +ENDPROC(pax_exit_kernel)
13907 +#endif
13908 +
13909 + .macro pax_enter_kernel_user
13910 + pax_set_fptr_mask
13911 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13912 + call pax_enter_kernel_user
13913 +#endif
13914 + .endm
13915 +
13916 + .macro pax_exit_kernel_user
13917 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13918 + call pax_exit_kernel_user
13919 +#endif
13920 +#ifdef CONFIG_PAX_RANDKSTACK
13921 + pushq %rax
13922 + call pax_randomize_kstack
13923 + popq %rax
13924 +#endif
13925 + .endm
13926 +
13927 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13928 +ENTRY(pax_enter_kernel_user)
13929 + pushq %rdi
13930 + pushq %rbx
13931 +
13932 +#ifdef CONFIG_PARAVIRT
13933 + PV_SAVE_REGS(CLBR_RDI)
13934 +#endif
13935 +
13936 + GET_CR3_INTO_RDI
13937 + mov %rdi,%rbx
13938 + add $__START_KERNEL_map,%rbx
13939 + sub phys_base(%rip),%rbx
13940 +
13941 +#ifdef CONFIG_PARAVIRT
13942 + pushq %rdi
13943 + cmpl $0, pv_info+PARAVIRT_enabled
13944 + jz 1f
13945 + i = 0
13946 + .rept USER_PGD_PTRS
13947 + mov i*8(%rbx),%rsi
13948 + mov $0,%sil
13949 + lea i*8(%rbx),%rdi
13950 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13951 + i = i + 1
13952 + .endr
13953 + jmp 2f
13954 +1:
13955 +#endif
13956 +
13957 + i = 0
13958 + .rept USER_PGD_PTRS
13959 + movb $0,i*8(%rbx)
13960 + i = i + 1
13961 + .endr
13962 +
13963 +#ifdef CONFIG_PARAVIRT
13964 +2: popq %rdi
13965 +#endif
13966 + SET_RDI_INTO_CR3
13967 +
13968 +#ifdef CONFIG_PAX_KERNEXEC
13969 + GET_CR0_INTO_RDI
13970 + bts $16,%rdi
13971 + SET_RDI_INTO_CR0
13972 +#endif
13973 +
13974 +#ifdef CONFIG_PARAVIRT
13975 + PV_RESTORE_REGS(CLBR_RDI)
13976 +#endif
13977 +
13978 + popq %rbx
13979 + popq %rdi
13980 + pax_force_retaddr
13981 + retq
13982 +ENDPROC(pax_enter_kernel_user)
13983 +
13984 +ENTRY(pax_exit_kernel_user)
13985 + push %rdi
13986 +
13987 +#ifdef CONFIG_PARAVIRT
13988 + pushq %rbx
13989 + PV_SAVE_REGS(CLBR_RDI)
13990 +#endif
13991 +
13992 +#ifdef CONFIG_PAX_KERNEXEC
13993 + GET_CR0_INTO_RDI
13994 + btr $16,%rdi
13995 + SET_RDI_INTO_CR0
13996 +#endif
13997 +
13998 + GET_CR3_INTO_RDI
13999 + add $__START_KERNEL_map,%rdi
14000 + sub phys_base(%rip),%rdi
14001 +
14002 +#ifdef CONFIG_PARAVIRT
14003 + cmpl $0, pv_info+PARAVIRT_enabled
14004 + jz 1f
14005 + mov %rdi,%rbx
14006 + i = 0
14007 + .rept USER_PGD_PTRS
14008 + mov i*8(%rbx),%rsi
14009 + mov $0x67,%sil
14010 + lea i*8(%rbx),%rdi
14011 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
14012 + i = i + 1
14013 + .endr
14014 + jmp 2f
14015 +1:
14016 +#endif
14017 +
14018 + i = 0
14019 + .rept USER_PGD_PTRS
14020 + movb $0x67,i*8(%rdi)
14021 + i = i + 1
14022 + .endr
14023 +
14024 +#ifdef CONFIG_PARAVIRT
14025 +2: PV_RESTORE_REGS(CLBR_RDI)
14026 + popq %rbx
14027 +#endif
14028 +
14029 + popq %rdi
14030 + pax_force_retaddr
14031 + retq
14032 +ENDPROC(pax_exit_kernel_user)
14033 +#endif
14034 +
14035 +.macro pax_erase_kstack
14036 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14037 + call pax_erase_kstack
14038 +#endif
14039 +.endm
14040 +
14041 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14042 +/*
14043 + * r11: thread_info
14044 + * rcx, rdx: can be clobbered
14045 + */
14046 +ENTRY(pax_erase_kstack)
14047 + pushq %rdi
14048 + pushq %rax
14049 + pushq %r11
14050 +
14051 + GET_THREAD_INFO(%r11)
14052 + mov TI_lowest_stack(%r11), %rdi
14053 + mov $-0xBEEF, %rax
14054 + std
14055 +
14056 +1: mov %edi, %ecx
14057 + and $THREAD_SIZE_asm - 1, %ecx
14058 + shr $3, %ecx
14059 + repne scasq
14060 + jecxz 2f
14061 +
14062 + cmp $2*8, %ecx
14063 + jc 2f
14064 +
14065 + mov $2*8, %ecx
14066 + repe scasq
14067 + jecxz 2f
14068 + jne 1b
14069 +
14070 +2: cld
14071 + mov %esp, %ecx
14072 + sub %edi, %ecx
14073 +
14074 + cmp $THREAD_SIZE_asm, %rcx
14075 + jb 3f
14076 + ud2
14077 +3:
14078 +
14079 + shr $3, %ecx
14080 + rep stosq
14081 +
14082 + mov TI_task_thread_sp0(%r11), %rdi
14083 + sub $256, %rdi
14084 + mov %rdi, TI_lowest_stack(%r11)
14085 +
14086 + popq %r11
14087 + popq %rax
14088 + popq %rdi
14089 + pax_force_retaddr
14090 + ret
14091 +ENDPROC(pax_erase_kstack)
14092 +#endif
14093
14094 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
14095 #ifdef CONFIG_TRACE_IRQFLAGS
14096 @@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
14097 .endm
14098
14099 .macro UNFAKE_STACK_FRAME
14100 - addq $8*6, %rsp
14101 - CFI_ADJUST_CFA_OFFSET -(6*8)
14102 + addq $8*6 + ARG_SKIP, %rsp
14103 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
14104 .endm
14105
14106 /*
14107 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
14108 movq %rsp, %rsi
14109
14110 leaq -RBP(%rsp),%rdi /* arg1 for handler */
14111 - testl $3, CS(%rdi)
14112 + testb $3, CS(%rdi)
14113 je 1f
14114 SWAPGS
14115 /*
14116 @@ -355,9 +639,10 @@ ENTRY(save_rest)
14117 movq_cfi r15, R15+16
14118 movq %r11, 8(%rsp) /* return address */
14119 FIXUP_TOP_OF_STACK %r11, 16
14120 + pax_force_retaddr
14121 ret
14122 CFI_ENDPROC
14123 -END(save_rest)
14124 +ENDPROC(save_rest)
14125
14126 /* save complete stack frame */
14127 .pushsection .kprobes.text, "ax"
14128 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
14129 js 1f /* negative -> in kernel */
14130 SWAPGS
14131 xorl %ebx,%ebx
14132 -1: ret
14133 +1: pax_force_retaddr_bts
14134 + ret
14135 CFI_ENDPROC
14136 -END(save_paranoid)
14137 +ENDPROC(save_paranoid)
14138 .popsection
14139
14140 /*
14141 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
14142
14143 RESTORE_REST
14144
14145 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14146 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14147 je int_ret_from_sys_call
14148
14149 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
14150 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
14151 jmp ret_from_sys_call # go to the SYSRET fastpath
14152
14153 CFI_ENDPROC
14154 -END(ret_from_fork)
14155 +ENDPROC(ret_from_fork)
14156
14157 /*
14158 * System call entry. Up to 6 arguments in registers are supported.
14159 @@ -456,7 +742,7 @@ END(ret_from_fork)
14160 ENTRY(system_call)
14161 CFI_STARTPROC simple
14162 CFI_SIGNAL_FRAME
14163 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14164 + CFI_DEF_CFA rsp,0
14165 CFI_REGISTER rip,rcx
14166 /*CFI_REGISTER rflags,r11*/
14167 SWAPGS_UNSAFE_STACK
14168 @@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
14169
14170 movq %rsp,PER_CPU_VAR(old_rsp)
14171 movq PER_CPU_VAR(kernel_stack),%rsp
14172 + SAVE_ARGS 8*6,0
14173 + pax_enter_kernel_user
14174 /*
14175 * No need to follow this irqs off/on section - it's straight
14176 * and short:
14177 */
14178 ENABLE_INTERRUPTS(CLBR_NONE)
14179 - SAVE_ARGS 8,0
14180 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14181 movq %rcx,RIP-ARGOFFSET(%rsp)
14182 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14183 @@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
14184 system_call_fastpath:
14185 cmpq $__NR_syscall_max,%rax
14186 ja badsys
14187 - movq %r10,%rcx
14188 + movq R10-ARGOFFSET(%rsp),%rcx
14189 call *sys_call_table(,%rax,8) # XXX: rip relative
14190 movq %rax,RAX-ARGOFFSET(%rsp)
14191 /*
14192 @@ -503,6 +790,8 @@ sysret_check:
14193 andl %edi,%edx
14194 jnz sysret_careful
14195 CFI_REMEMBER_STATE
14196 + pax_exit_kernel_user
14197 + pax_erase_kstack
14198 /*
14199 * sysretq will re-enable interrupts:
14200 */
14201 @@ -554,14 +843,18 @@ badsys:
14202 * jump back to the normal fast path.
14203 */
14204 auditsys:
14205 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
14206 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
14207 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
14208 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
14209 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
14210 movq %rax,%rsi /* 2nd arg: syscall number */
14211 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14212 call audit_syscall_entry
14213 +
14214 + pax_erase_kstack
14215 +
14216 LOAD_ARGS 0 /* reload call-clobbered registers */
14217 + pax_set_fptr_mask
14218 jmp system_call_fastpath
14219
14220 /*
14221 @@ -591,16 +884,20 @@ tracesys:
14222 FIXUP_TOP_OF_STACK %rdi
14223 movq %rsp,%rdi
14224 call syscall_trace_enter
14225 +
14226 + pax_erase_kstack
14227 +
14228 /*
14229 * Reload arg registers from stack in case ptrace changed them.
14230 * We don't reload %rax because syscall_trace_enter() returned
14231 * the value it wants us to use in the table lookup.
14232 */
14233 LOAD_ARGS ARGOFFSET, 1
14234 + pax_set_fptr_mask
14235 RESTORE_REST
14236 cmpq $__NR_syscall_max,%rax
14237 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
14238 - movq %r10,%rcx /* fixup for C */
14239 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
14240 call *sys_call_table(,%rax,8)
14241 movq %rax,RAX-ARGOFFSET(%rsp)
14242 /* Use IRET because user could have changed frame */
14243 @@ -612,7 +909,7 @@ tracesys:
14244 GLOBAL(int_ret_from_sys_call)
14245 DISABLE_INTERRUPTS(CLBR_NONE)
14246 TRACE_IRQS_OFF
14247 - testl $3,CS-ARGOFFSET(%rsp)
14248 + testb $3,CS-ARGOFFSET(%rsp)
14249 je retint_restore_args
14250 movl $_TIF_ALLWORK_MASK,%edi
14251 /* edi: mask to check */
14252 @@ -623,6 +920,7 @@ GLOBAL(int_with_check)
14253 andl %edi,%edx
14254 jnz int_careful
14255 andl $~TS_COMPAT,TI_status(%rcx)
14256 + pax_erase_kstack
14257 jmp retint_swapgs
14258
14259 /* Either reschedule or signal or syscall exit tracking needed. */
14260 @@ -669,7 +967,7 @@ int_restore_rest:
14261 TRACE_IRQS_OFF
14262 jmp int_with_check
14263 CFI_ENDPROC
14264 -END(system_call)
14265 +ENDPROC(system_call)
14266
14267 /*
14268 * Certain special system calls that need to save a complete full stack frame.
14269 @@ -685,7 +983,7 @@ ENTRY(\label)
14270 call \func
14271 jmp ptregscall_common
14272 CFI_ENDPROC
14273 -END(\label)
14274 +ENDPROC(\label)
14275 .endm
14276
14277 PTREGSCALL stub_clone, sys_clone, %r8
14278 @@ -703,9 +1001,10 @@ ENTRY(ptregscall_common)
14279 movq_cfi_restore R12+8, r12
14280 movq_cfi_restore RBP+8, rbp
14281 movq_cfi_restore RBX+8, rbx
14282 + pax_force_retaddr
14283 ret $REST_SKIP /* pop extended registers */
14284 CFI_ENDPROC
14285 -END(ptregscall_common)
14286 +ENDPROC(ptregscall_common)
14287
14288 ENTRY(stub_execve)
14289 CFI_STARTPROC
14290 @@ -720,7 +1019,7 @@ ENTRY(stub_execve)
14291 RESTORE_REST
14292 jmp int_ret_from_sys_call
14293 CFI_ENDPROC
14294 -END(stub_execve)
14295 +ENDPROC(stub_execve)
14296
14297 /*
14298 * sigreturn is special because it needs to restore all registers on return.
14299 @@ -738,7 +1037,7 @@ ENTRY(stub_rt_sigreturn)
14300 RESTORE_REST
14301 jmp int_ret_from_sys_call
14302 CFI_ENDPROC
14303 -END(stub_rt_sigreturn)
14304 +ENDPROC(stub_rt_sigreturn)
14305
14306 /*
14307 * Build the entry stubs and pointer table with some assembler magic.
14308 @@ -773,7 +1072,7 @@ vector=vector+1
14309 2: jmp common_interrupt
14310 .endr
14311 CFI_ENDPROC
14312 -END(irq_entries_start)
14313 +ENDPROC(irq_entries_start)
14314
14315 .previous
14316 END(interrupt)
14317 @@ -793,6 +1092,16 @@ END(interrupt)
14318 subq $ORIG_RAX-RBP, %rsp
14319 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14320 SAVE_ARGS_IRQ
14321 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14322 + testb $3, CS(%rdi)
14323 + jnz 1f
14324 + pax_enter_kernel
14325 + jmp 2f
14326 +1: pax_enter_kernel_user
14327 +2:
14328 +#else
14329 + pax_enter_kernel
14330 +#endif
14331 call \func
14332 .endm
14333
14334 @@ -824,7 +1133,7 @@ ret_from_intr:
14335
14336 exit_intr:
14337 GET_THREAD_INFO(%rcx)
14338 - testl $3,CS-ARGOFFSET(%rsp)
14339 + testb $3,CS-ARGOFFSET(%rsp)
14340 je retint_kernel
14341
14342 /* Interrupt came from user space */
14343 @@ -846,12 +1155,15 @@ retint_swapgs: /* return to user-space */
14344 * The iretq could re-enable interrupts:
14345 */
14346 DISABLE_INTERRUPTS(CLBR_ANY)
14347 + pax_exit_kernel_user
14348 TRACE_IRQS_IRETQ
14349 SWAPGS
14350 jmp restore_args
14351
14352 retint_restore_args: /* return to kernel space */
14353 DISABLE_INTERRUPTS(CLBR_ANY)
14354 + pax_exit_kernel
14355 + pax_force_retaddr RIP-ARGOFFSET
14356 /*
14357 * The iretq could re-enable interrupts:
14358 */
14359 @@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14360 #endif
14361
14362 CFI_ENDPROC
14363 -END(common_interrupt)
14364 +ENDPROC(common_interrupt)
14365 /*
14366 * End of kprobes section
14367 */
14368 @@ -956,7 +1268,7 @@ ENTRY(\sym)
14369 interrupt \do_sym
14370 jmp ret_from_intr
14371 CFI_ENDPROC
14372 -END(\sym)
14373 +ENDPROC(\sym)
14374 .endm
14375
14376 #ifdef CONFIG_SMP
14377 @@ -1021,12 +1333,22 @@ ENTRY(\sym)
14378 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14379 call error_entry
14380 DEFAULT_FRAME 0
14381 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14382 + testb $3, CS(%rsp)
14383 + jnz 1f
14384 + pax_enter_kernel
14385 + jmp 2f
14386 +1: pax_enter_kernel_user
14387 +2:
14388 +#else
14389 + pax_enter_kernel
14390 +#endif
14391 movq %rsp,%rdi /* pt_regs pointer */
14392 xorl %esi,%esi /* no error code */
14393 call \do_sym
14394 jmp error_exit /* %ebx: no swapgs flag */
14395 CFI_ENDPROC
14396 -END(\sym)
14397 +ENDPROC(\sym)
14398 .endm
14399
14400 .macro paranoidzeroentry sym do_sym
14401 @@ -1038,15 +1360,25 @@ ENTRY(\sym)
14402 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14403 call save_paranoid
14404 TRACE_IRQS_OFF
14405 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14406 + testb $3, CS(%rsp)
14407 + jnz 1f
14408 + pax_enter_kernel
14409 + jmp 2f
14410 +1: pax_enter_kernel_user
14411 +2:
14412 +#else
14413 + pax_enter_kernel
14414 +#endif
14415 movq %rsp,%rdi /* pt_regs pointer */
14416 xorl %esi,%esi /* no error code */
14417 call \do_sym
14418 jmp paranoid_exit /* %ebx: no swapgs flag */
14419 CFI_ENDPROC
14420 -END(\sym)
14421 +ENDPROC(\sym)
14422 .endm
14423
14424 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14425 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14426 .macro paranoidzeroentry_ist sym do_sym ist
14427 ENTRY(\sym)
14428 INTR_FRAME
14429 @@ -1056,14 +1388,30 @@ ENTRY(\sym)
14430 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14431 call save_paranoid
14432 TRACE_IRQS_OFF
14433 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14434 + testb $3, CS(%rsp)
14435 + jnz 1f
14436 + pax_enter_kernel
14437 + jmp 2f
14438 +1: pax_enter_kernel_user
14439 +2:
14440 +#else
14441 + pax_enter_kernel
14442 +#endif
14443 movq %rsp,%rdi /* pt_regs pointer */
14444 xorl %esi,%esi /* no error code */
14445 +#ifdef CONFIG_SMP
14446 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14447 + lea init_tss(%r12), %r12
14448 +#else
14449 + lea init_tss(%rip), %r12
14450 +#endif
14451 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14452 call \do_sym
14453 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14454 jmp paranoid_exit /* %ebx: no swapgs flag */
14455 CFI_ENDPROC
14456 -END(\sym)
14457 +ENDPROC(\sym)
14458 .endm
14459
14460 .macro errorentry sym do_sym
14461 @@ -1074,13 +1422,23 @@ ENTRY(\sym)
14462 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14463 call error_entry
14464 DEFAULT_FRAME 0
14465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14466 + testb $3, CS(%rsp)
14467 + jnz 1f
14468 + pax_enter_kernel
14469 + jmp 2f
14470 +1: pax_enter_kernel_user
14471 +2:
14472 +#else
14473 + pax_enter_kernel
14474 +#endif
14475 movq %rsp,%rdi /* pt_regs pointer */
14476 movq ORIG_RAX(%rsp),%rsi /* get error code */
14477 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14478 call \do_sym
14479 jmp error_exit /* %ebx: no swapgs flag */
14480 CFI_ENDPROC
14481 -END(\sym)
14482 +ENDPROC(\sym)
14483 .endm
14484
14485 /* error code is on the stack already */
14486 @@ -1093,13 +1451,23 @@ ENTRY(\sym)
14487 call save_paranoid
14488 DEFAULT_FRAME 0
14489 TRACE_IRQS_OFF
14490 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14491 + testb $3, CS(%rsp)
14492 + jnz 1f
14493 + pax_enter_kernel
14494 + jmp 2f
14495 +1: pax_enter_kernel_user
14496 +2:
14497 +#else
14498 + pax_enter_kernel
14499 +#endif
14500 movq %rsp,%rdi /* pt_regs pointer */
14501 movq ORIG_RAX(%rsp),%rsi /* get error code */
14502 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14503 call \do_sym
14504 jmp paranoid_exit /* %ebx: no swapgs flag */
14505 CFI_ENDPROC
14506 -END(\sym)
14507 +ENDPROC(\sym)
14508 .endm
14509
14510 zeroentry divide_error do_divide_error
14511 @@ -1129,9 +1497,10 @@ gs_change:
14512 2: mfence /* workaround */
14513 SWAPGS
14514 popfq_cfi
14515 + pax_force_retaddr
14516 ret
14517 CFI_ENDPROC
14518 -END(native_load_gs_index)
14519 +ENDPROC(native_load_gs_index)
14520
14521 .section __ex_table,"a"
14522 .align 8
14523 @@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14524 * Here we are in the child and the registers are set as they were
14525 * at kernel_thread() invocation in the parent.
14526 */
14527 + pax_force_fptr %rsi
14528 call *%rsi
14529 # exit
14530 mov %eax, %edi
14531 call do_exit
14532 ud2 # padding for call trace
14533 CFI_ENDPROC
14534 -END(kernel_thread_helper)
14535 +ENDPROC(kernel_thread_helper)
14536
14537 /*
14538 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14539 @@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14540 RESTORE_REST
14541 testq %rax,%rax
14542 je int_ret_from_sys_call
14543 - RESTORE_ARGS
14544 UNFAKE_STACK_FRAME
14545 + pax_force_retaddr
14546 ret
14547 CFI_ENDPROC
14548 -END(kernel_execve)
14549 +ENDPROC(kernel_execve)
14550
14551 /* Call softirq on interrupt stack. Interrupts are off. */
14552 ENTRY(call_softirq)
14553 @@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14554 CFI_DEF_CFA_REGISTER rsp
14555 CFI_ADJUST_CFA_OFFSET -8
14556 decl PER_CPU_VAR(irq_count)
14557 + pax_force_retaddr
14558 ret
14559 CFI_ENDPROC
14560 -END(call_softirq)
14561 +ENDPROC(call_softirq)
14562
14563 #ifdef CONFIG_XEN
14564 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14565 @@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14566 decl PER_CPU_VAR(irq_count)
14567 jmp error_exit
14568 CFI_ENDPROC
14569 -END(xen_do_hypervisor_callback)
14570 +ENDPROC(xen_do_hypervisor_callback)
14571
14572 /*
14573 * Hypervisor uses this for application faults while it executes.
14574 @@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14575 SAVE_ALL
14576 jmp error_exit
14577 CFI_ENDPROC
14578 -END(xen_failsafe_callback)
14579 +ENDPROC(xen_failsafe_callback)
14580
14581 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14582 xen_hvm_callback_vector xen_evtchn_do_upcall
14583 @@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14584 TRACE_IRQS_OFF
14585 testl %ebx,%ebx /* swapgs needed? */
14586 jnz paranoid_restore
14587 - testl $3,CS(%rsp)
14588 + testb $3,CS(%rsp)
14589 jnz paranoid_userspace
14590 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14591 + pax_exit_kernel
14592 + TRACE_IRQS_IRETQ 0
14593 + SWAPGS_UNSAFE_STACK
14594 + RESTORE_ALL 8
14595 + pax_force_retaddr_bts
14596 + jmp irq_return
14597 +#endif
14598 paranoid_swapgs:
14599 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14600 + pax_exit_kernel_user
14601 +#else
14602 + pax_exit_kernel
14603 +#endif
14604 TRACE_IRQS_IRETQ 0
14605 SWAPGS_UNSAFE_STACK
14606 RESTORE_ALL 8
14607 jmp irq_return
14608 paranoid_restore:
14609 + pax_exit_kernel
14610 TRACE_IRQS_IRETQ 0
14611 RESTORE_ALL 8
14612 + pax_force_retaddr_bts
14613 jmp irq_return
14614 paranoid_userspace:
14615 GET_THREAD_INFO(%rcx)
14616 @@ -1394,7 +1780,7 @@ paranoid_schedule:
14617 TRACE_IRQS_OFF
14618 jmp paranoid_userspace
14619 CFI_ENDPROC
14620 -END(paranoid_exit)
14621 +ENDPROC(paranoid_exit)
14622
14623 /*
14624 * Exception entry point. This expects an error code/orig_rax on the stack.
14625 @@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14626 movq_cfi r14, R14+8
14627 movq_cfi r15, R15+8
14628 xorl %ebx,%ebx
14629 - testl $3,CS+8(%rsp)
14630 + testb $3,CS+8(%rsp)
14631 je error_kernelspace
14632 error_swapgs:
14633 SWAPGS
14634 error_sti:
14635 TRACE_IRQS_OFF
14636 + pax_force_retaddr_bts
14637 ret
14638
14639 /*
14640 @@ -1453,7 +1840,7 @@ bstep_iret:
14641 movq %rcx,RIP+8(%rsp)
14642 jmp error_swapgs
14643 CFI_ENDPROC
14644 -END(error_entry)
14645 +ENDPROC(error_entry)
14646
14647
14648 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14649 @@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14650 jnz retint_careful
14651 jmp retint_swapgs
14652 CFI_ENDPROC
14653 -END(error_exit)
14654 +ENDPROC(error_exit)
14655
14656
14657 /* runs on exception stack */
14658 @@ -1485,6 +1872,16 @@ ENTRY(nmi)
14659 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14660 call save_paranoid
14661 DEFAULT_FRAME 0
14662 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14663 + testb $3, CS(%rsp)
14664 + jnz 1f
14665 + pax_enter_kernel
14666 + jmp 2f
14667 +1: pax_enter_kernel_user
14668 +2:
14669 +#else
14670 + pax_enter_kernel
14671 +#endif
14672 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14673 movq %rsp,%rdi
14674 movq $-1,%rsi
14675 @@ -1495,12 +1892,28 @@ ENTRY(nmi)
14676 DISABLE_INTERRUPTS(CLBR_NONE)
14677 testl %ebx,%ebx /* swapgs needed? */
14678 jnz nmi_restore
14679 - testl $3,CS(%rsp)
14680 + testb $3,CS(%rsp)
14681 jnz nmi_userspace
14682 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14683 + pax_exit_kernel
14684 + SWAPGS_UNSAFE_STACK
14685 + RESTORE_ALL 8
14686 + pax_force_retaddr_bts
14687 + jmp irq_return
14688 +#endif
14689 nmi_swapgs:
14690 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14691 + pax_exit_kernel_user
14692 +#else
14693 + pax_exit_kernel
14694 +#endif
14695 SWAPGS_UNSAFE_STACK
14696 + RESTORE_ALL 8
14697 + jmp irq_return
14698 nmi_restore:
14699 + pax_exit_kernel
14700 RESTORE_ALL 8
14701 + pax_force_retaddr_bts
14702 jmp irq_return
14703 nmi_userspace:
14704 GET_THREAD_INFO(%rcx)
14705 @@ -1529,14 +1942,14 @@ nmi_schedule:
14706 jmp paranoid_exit
14707 CFI_ENDPROC
14708 #endif
14709 -END(nmi)
14710 +ENDPROC(nmi)
14711
14712 ENTRY(ignore_sysret)
14713 CFI_STARTPROC
14714 mov $-ENOSYS,%eax
14715 sysret
14716 CFI_ENDPROC
14717 -END(ignore_sysret)
14718 +ENDPROC(ignore_sysret)
14719
14720 /*
14721 * End of kprobes section
14722 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14723 index c9a281f..ce2f317 100644
14724 --- a/arch/x86/kernel/ftrace.c
14725 +++ b/arch/x86/kernel/ftrace.c
14726 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14727 static const void *mod_code_newcode; /* holds the text to write to the IP */
14728
14729 static unsigned nmi_wait_count;
14730 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14731 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14732
14733 int ftrace_arch_read_dyn_info(char *buf, int size)
14734 {
14735 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14736
14737 r = snprintf(buf, size, "%u %u",
14738 nmi_wait_count,
14739 - atomic_read(&nmi_update_count));
14740 + atomic_read_unchecked(&nmi_update_count));
14741 return r;
14742 }
14743
14744 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14745
14746 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14747 smp_rmb();
14748 + pax_open_kernel();
14749 ftrace_mod_code();
14750 - atomic_inc(&nmi_update_count);
14751 + pax_close_kernel();
14752 + atomic_inc_unchecked(&nmi_update_count);
14753 }
14754 /* Must have previous changes seen before executions */
14755 smp_mb();
14756 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14757 {
14758 unsigned char replaced[MCOUNT_INSN_SIZE];
14759
14760 + ip = ktla_ktva(ip);
14761 +
14762 /*
14763 * Note: Due to modules and __init, code can
14764 * disappear and change, we need to protect against faulting
14765 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14766 unsigned char old[MCOUNT_INSN_SIZE], *new;
14767 int ret;
14768
14769 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14770 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14771 new = ftrace_call_replace(ip, (unsigned long)func);
14772 ret = ftrace_modify_code(ip, old, new);
14773
14774 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14775 {
14776 unsigned char code[MCOUNT_INSN_SIZE];
14777
14778 + ip = ktla_ktva(ip);
14779 +
14780 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14781 return -EFAULT;
14782
14783 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14784 index 3bb0850..55a56f4 100644
14785 --- a/arch/x86/kernel/head32.c
14786 +++ b/arch/x86/kernel/head32.c
14787 @@ -19,6 +19,7 @@
14788 #include <asm/io_apic.h>
14789 #include <asm/bios_ebda.h>
14790 #include <asm/tlbflush.h>
14791 +#include <asm/boot.h>
14792
14793 static void __init i386_default_early_setup(void)
14794 {
14795 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14796 {
14797 memblock_init();
14798
14799 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14800 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14801
14802 #ifdef CONFIG_BLK_DEV_INITRD
14803 /* Reserve INITRD */
14804 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14805 index ce0be7c..c41476e 100644
14806 --- a/arch/x86/kernel/head_32.S
14807 +++ b/arch/x86/kernel/head_32.S
14808 @@ -25,6 +25,12 @@
14809 /* Physical address */
14810 #define pa(X) ((X) - __PAGE_OFFSET)
14811
14812 +#ifdef CONFIG_PAX_KERNEXEC
14813 +#define ta(X) (X)
14814 +#else
14815 +#define ta(X) ((X) - __PAGE_OFFSET)
14816 +#endif
14817 +
14818 /*
14819 * References to members of the new_cpu_data structure.
14820 */
14821 @@ -54,11 +60,7 @@
14822 * and small than max_low_pfn, otherwise will waste some page table entries
14823 */
14824
14825 -#if PTRS_PER_PMD > 1
14826 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14827 -#else
14828 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14829 -#endif
14830 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14831
14832 /* Number of possible pages in the lowmem region */
14833 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14834 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14835 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14836
14837 /*
14838 + * Real beginning of normal "text" segment
14839 + */
14840 +ENTRY(stext)
14841 +ENTRY(_stext)
14842 +
14843 +/*
14844 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14845 * %esi points to the real-mode code as a 32-bit pointer.
14846 * CS and DS must be 4 GB flat segments, but we don't depend on
14847 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14848 * can.
14849 */
14850 __HEAD
14851 +
14852 +#ifdef CONFIG_PAX_KERNEXEC
14853 + jmp startup_32
14854 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14855 +.fill PAGE_SIZE-5,1,0xcc
14856 +#endif
14857 +
14858 ENTRY(startup_32)
14859 movl pa(stack_start),%ecx
14860
14861 @@ -105,6 +120,57 @@ ENTRY(startup_32)
14862 2:
14863 leal -__PAGE_OFFSET(%ecx),%esp
14864
14865 +#ifdef CONFIG_SMP
14866 + movl $pa(cpu_gdt_table),%edi
14867 + movl $__per_cpu_load,%eax
14868 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14869 + rorl $16,%eax
14870 + movb %al,__KERNEL_PERCPU + 4(%edi)
14871 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14872 + movl $__per_cpu_end - 1,%eax
14873 + subl $__per_cpu_start,%eax
14874 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14875 +#endif
14876 +
14877 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14878 + movl $NR_CPUS,%ecx
14879 + movl $pa(cpu_gdt_table),%edi
14880 +1:
14881 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14882 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14883 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14884 + addl $PAGE_SIZE_asm,%edi
14885 + loop 1b
14886 +#endif
14887 +
14888 +#ifdef CONFIG_PAX_KERNEXEC
14889 + movl $pa(boot_gdt),%edi
14890 + movl $__LOAD_PHYSICAL_ADDR,%eax
14891 + movw %ax,__BOOT_CS + 2(%edi)
14892 + rorl $16,%eax
14893 + movb %al,__BOOT_CS + 4(%edi)
14894 + movb %ah,__BOOT_CS + 7(%edi)
14895 + rorl $16,%eax
14896 +
14897 + ljmp $(__BOOT_CS),$1f
14898 +1:
14899 +
14900 + movl $NR_CPUS,%ecx
14901 + movl $pa(cpu_gdt_table),%edi
14902 + addl $__PAGE_OFFSET,%eax
14903 +1:
14904 + movw %ax,__KERNEL_CS + 2(%edi)
14905 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14906 + rorl $16,%eax
14907 + movb %al,__KERNEL_CS + 4(%edi)
14908 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14909 + movb %ah,__KERNEL_CS + 7(%edi)
14910 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14911 + rorl $16,%eax
14912 + addl $PAGE_SIZE_asm,%edi
14913 + loop 1b
14914 +#endif
14915 +
14916 /*
14917 * Clear BSS first so that there are no surprises...
14918 */
14919 @@ -195,8 +261,11 @@ ENTRY(startup_32)
14920 movl %eax, pa(max_pfn_mapped)
14921
14922 /* Do early initialization of the fixmap area */
14923 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14924 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14925 +#ifdef CONFIG_COMPAT_VDSO
14926 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14927 +#else
14928 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14929 +#endif
14930 #else /* Not PAE */
14931
14932 page_pde_offset = (__PAGE_OFFSET >> 20);
14933 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14934 movl %eax, pa(max_pfn_mapped)
14935
14936 /* Do early initialization of the fixmap area */
14937 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14938 - movl %eax,pa(initial_page_table+0xffc)
14939 +#ifdef CONFIG_COMPAT_VDSO
14940 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14941 +#else
14942 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14943 +#endif
14944 #endif
14945
14946 #ifdef CONFIG_PARAVIRT
14947 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14948 cmpl $num_subarch_entries, %eax
14949 jae bad_subarch
14950
14951 - movl pa(subarch_entries)(,%eax,4), %eax
14952 - subl $__PAGE_OFFSET, %eax
14953 - jmp *%eax
14954 + jmp *pa(subarch_entries)(,%eax,4)
14955
14956 bad_subarch:
14957 WEAK(lguest_entry)
14958 @@ -255,10 +325,10 @@ WEAK(xen_entry)
14959 __INITDATA
14960
14961 subarch_entries:
14962 - .long default_entry /* normal x86/PC */
14963 - .long lguest_entry /* lguest hypervisor */
14964 - .long xen_entry /* Xen hypervisor */
14965 - .long default_entry /* Moorestown MID */
14966 + .long ta(default_entry) /* normal x86/PC */
14967 + .long ta(lguest_entry) /* lguest hypervisor */
14968 + .long ta(xen_entry) /* Xen hypervisor */
14969 + .long ta(default_entry) /* Moorestown MID */
14970 num_subarch_entries = (. - subarch_entries) / 4
14971 .previous
14972 #else
14973 @@ -312,6 +382,7 @@ default_entry:
14974 orl %edx,%eax
14975 movl %eax,%cr4
14976
14977 +#ifdef CONFIG_X86_PAE
14978 testb $X86_CR4_PAE, %al # check if PAE is enabled
14979 jz 6f
14980
14981 @@ -340,6 +411,9 @@ default_entry:
14982 /* Make changes effective */
14983 wrmsr
14984
14985 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14986 +#endif
14987 +
14988 6:
14989
14990 /*
14991 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14992 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14993 movl %eax,%ss # after changing gdt.
14994
14995 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14996 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14997 movl %eax,%ds
14998 movl %eax,%es
14999
15000 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
15001 */
15002 cmpb $0,ready
15003 jne 1f
15004 - movl $gdt_page,%eax
15005 + movl $cpu_gdt_table,%eax
15006 movl $stack_canary,%ecx
15007 +#ifdef CONFIG_SMP
15008 + addl $__per_cpu_load,%ecx
15009 +#endif
15010 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
15011 shrl $16, %ecx
15012 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
15013 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
15014 1:
15015 -#endif
15016 movl $(__KERNEL_STACK_CANARY),%eax
15017 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15018 + movl $(__USER_DS),%eax
15019 +#else
15020 + xorl %eax,%eax
15021 +#endif
15022 movl %eax,%gs
15023
15024 xorl %eax,%eax # Clear LDT
15025 @@ -558,22 +639,22 @@ early_page_fault:
15026 jmp early_fault
15027
15028 early_fault:
15029 - cld
15030 #ifdef CONFIG_PRINTK
15031 + cmpl $1,%ss:early_recursion_flag
15032 + je hlt_loop
15033 + incl %ss:early_recursion_flag
15034 + cld
15035 pusha
15036 movl $(__KERNEL_DS),%eax
15037 movl %eax,%ds
15038 movl %eax,%es
15039 - cmpl $2,early_recursion_flag
15040 - je hlt_loop
15041 - incl early_recursion_flag
15042 movl %cr2,%eax
15043 pushl %eax
15044 pushl %edx /* trapno */
15045 pushl $fault_msg
15046 call printk
15047 +; call dump_stack
15048 #endif
15049 - call dump_stack
15050 hlt_loop:
15051 hlt
15052 jmp hlt_loop
15053 @@ -581,8 +662,11 @@ hlt_loop:
15054 /* This is the default interrupt "handler" :-) */
15055 ALIGN
15056 ignore_int:
15057 - cld
15058 #ifdef CONFIG_PRINTK
15059 + cmpl $2,%ss:early_recursion_flag
15060 + je hlt_loop
15061 + incl %ss:early_recursion_flag
15062 + cld
15063 pushl %eax
15064 pushl %ecx
15065 pushl %edx
15066 @@ -591,9 +675,6 @@ ignore_int:
15067 movl $(__KERNEL_DS),%eax
15068 movl %eax,%ds
15069 movl %eax,%es
15070 - cmpl $2,early_recursion_flag
15071 - je hlt_loop
15072 - incl early_recursion_flag
15073 pushl 16(%esp)
15074 pushl 24(%esp)
15075 pushl 32(%esp)
15076 @@ -622,29 +703,43 @@ ENTRY(initial_code)
15077 /*
15078 * BSS section
15079 */
15080 -__PAGE_ALIGNED_BSS
15081 - .align PAGE_SIZE
15082 #ifdef CONFIG_X86_PAE
15083 +.section .initial_pg_pmd,"a",@progbits
15084 initial_pg_pmd:
15085 .fill 1024*KPMDS,4,0
15086 #else
15087 +.section .initial_page_table,"a",@progbits
15088 ENTRY(initial_page_table)
15089 .fill 1024,4,0
15090 #endif
15091 +.section .initial_pg_fixmap,"a",@progbits
15092 initial_pg_fixmap:
15093 .fill 1024,4,0
15094 +.section .empty_zero_page,"a",@progbits
15095 ENTRY(empty_zero_page)
15096 .fill 4096,1,0
15097 +.section .swapper_pg_dir,"a",@progbits
15098 ENTRY(swapper_pg_dir)
15099 +#ifdef CONFIG_X86_PAE
15100 + .fill 4,8,0
15101 +#else
15102 .fill 1024,4,0
15103 +#endif
15104 +
15105 +/*
15106 + * The IDT has to be page-aligned to simplify the Pentium
15107 + * F0 0F bug workaround.. We have a special link segment
15108 + * for this.
15109 + */
15110 +.section .idt,"a",@progbits
15111 +ENTRY(idt_table)
15112 + .fill 256,8,0
15113
15114 /*
15115 * This starts the data section.
15116 */
15117 #ifdef CONFIG_X86_PAE
15118 -__PAGE_ALIGNED_DATA
15119 - /* Page-aligned for the benefit of paravirt? */
15120 - .align PAGE_SIZE
15121 +.section .initial_page_table,"a",@progbits
15122 ENTRY(initial_page_table)
15123 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
15124 # if KPMDS == 3
15125 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
15126 # error "Kernel PMDs should be 1, 2 or 3"
15127 # endif
15128 .align PAGE_SIZE /* needs to be page-sized too */
15129 +
15130 +#ifdef CONFIG_PAX_PER_CPU_PGD
15131 +ENTRY(cpu_pgd)
15132 + .rept NR_CPUS
15133 + .fill 4,8,0
15134 + .endr
15135 +#endif
15136 +
15137 #endif
15138
15139 .data
15140 .balign 4
15141 ENTRY(stack_start)
15142 - .long init_thread_union+THREAD_SIZE
15143 + .long init_thread_union+THREAD_SIZE-8
15144
15145 +ready: .byte 0
15146 +
15147 +.section .rodata,"a",@progbits
15148 early_recursion_flag:
15149 .long 0
15150
15151 -ready: .byte 0
15152 -
15153 int_msg:
15154 .asciz "Unknown interrupt or fault at: %p %p %p\n"
15155
15156 @@ -707,7 +811,7 @@ fault_msg:
15157 .word 0 # 32 bit align gdt_desc.address
15158 boot_gdt_descr:
15159 .word __BOOT_DS+7
15160 - .long boot_gdt - __PAGE_OFFSET
15161 + .long pa(boot_gdt)
15162
15163 .word 0 # 32-bit align idt_desc.address
15164 idt_descr:
15165 @@ -718,7 +822,7 @@ idt_descr:
15166 .word 0 # 32 bit align gdt_desc.address
15167 ENTRY(early_gdt_descr)
15168 .word GDT_ENTRIES*8-1
15169 - .long gdt_page /* Overwritten for secondary CPUs */
15170 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
15171
15172 /*
15173 * The boot_gdt must mirror the equivalent in setup.S and is
15174 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
15175 .align L1_CACHE_BYTES
15176 ENTRY(boot_gdt)
15177 .fill GDT_ENTRY_BOOT_CS,8,0
15178 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
15179 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
15180 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
15181 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
15182 +
15183 + .align PAGE_SIZE_asm
15184 +ENTRY(cpu_gdt_table)
15185 + .rept NR_CPUS
15186 + .quad 0x0000000000000000 /* NULL descriptor */
15187 + .quad 0x0000000000000000 /* 0x0b reserved */
15188 + .quad 0x0000000000000000 /* 0x13 reserved */
15189 + .quad 0x0000000000000000 /* 0x1b reserved */
15190 +
15191 +#ifdef CONFIG_PAX_KERNEXEC
15192 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
15193 +#else
15194 + .quad 0x0000000000000000 /* 0x20 unused */
15195 +#endif
15196 +
15197 + .quad 0x0000000000000000 /* 0x28 unused */
15198 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
15199 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
15200 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
15201 + .quad 0x0000000000000000 /* 0x4b reserved */
15202 + .quad 0x0000000000000000 /* 0x53 reserved */
15203 + .quad 0x0000000000000000 /* 0x5b reserved */
15204 +
15205 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
15206 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
15207 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
15208 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
15209 +
15210 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
15211 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
15212 +
15213 + /*
15214 + * Segments used for calling PnP BIOS have byte granularity.
15215 + * The code segments and data segments have fixed 64k limits,
15216 + * the transfer segment sizes are set at run time.
15217 + */
15218 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
15219 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
15220 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
15221 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
15222 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
15223 +
15224 + /*
15225 + * The APM segments have byte granularity and their bases
15226 + * are set at run time. All have 64k limits.
15227 + */
15228 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
15229 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
15230 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
15231 +
15232 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
15233 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
15234 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
15235 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
15236 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
15237 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
15238 +
15239 + /* Be sure this is zeroed to avoid false validations in Xen */
15240 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
15241 + .endr
15242 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
15243 index e11e394..9aebc5d 100644
15244 --- a/arch/x86/kernel/head_64.S
15245 +++ b/arch/x86/kernel/head_64.S
15246 @@ -19,6 +19,8 @@
15247 #include <asm/cache.h>
15248 #include <asm/processor-flags.h>
15249 #include <asm/percpu.h>
15250 +#include <asm/cpufeature.h>
15251 +#include <asm/alternative-asm.h>
15252
15253 #ifdef CONFIG_PARAVIRT
15254 #include <asm/asm-offsets.h>
15255 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15256 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15257 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15258 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15259 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
15260 +L3_VMALLOC_START = pud_index(VMALLOC_START)
15261 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
15262 +L3_VMALLOC_END = pud_index(VMALLOC_END)
15263 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15264 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15265
15266 .text
15267 __HEAD
15268 @@ -85,35 +93,23 @@ startup_64:
15269 */
15270 addq %rbp, init_level4_pgt + 0(%rip)
15271 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15272 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15273 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15274 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15275 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15276
15277 addq %rbp, level3_ident_pgt + 0(%rip)
15278 +#ifndef CONFIG_XEN
15279 + addq %rbp, level3_ident_pgt + 8(%rip)
15280 +#endif
15281
15282 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15283 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15284 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15285 +
15286 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15287 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15288
15289 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15290 -
15291 - /* Add an Identity mapping if I am above 1G */
15292 - leaq _text(%rip), %rdi
15293 - andq $PMD_PAGE_MASK, %rdi
15294 -
15295 - movq %rdi, %rax
15296 - shrq $PUD_SHIFT, %rax
15297 - andq $(PTRS_PER_PUD - 1), %rax
15298 - jz ident_complete
15299 -
15300 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15301 - leaq level3_ident_pgt(%rip), %rbx
15302 - movq %rdx, 0(%rbx, %rax, 8)
15303 -
15304 - movq %rdi, %rax
15305 - shrq $PMD_SHIFT, %rax
15306 - andq $(PTRS_PER_PMD - 1), %rax
15307 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15308 - leaq level2_spare_pgt(%rip), %rbx
15309 - movq %rdx, 0(%rbx, %rax, 8)
15310 -ident_complete:
15311 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15312
15313 /*
15314 * Fixup the kernel text+data virtual addresses. Note that
15315 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15316 * after the boot processor executes this code.
15317 */
15318
15319 - /* Enable PAE mode and PGE */
15320 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15321 + /* Enable PAE mode and PSE/PGE */
15322 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15323 movq %rax, %cr4
15324
15325 /* Setup early boot stage 4 level pagetables. */
15326 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15327 movl $MSR_EFER, %ecx
15328 rdmsr
15329 btsl $_EFER_SCE, %eax /* Enable System Call */
15330 - btl $20,%edi /* No Execute supported? */
15331 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15332 jnc 1f
15333 btsl $_EFER_NX, %eax
15334 + leaq init_level4_pgt(%rip), %rdi
15335 +#ifndef CONFIG_EFI
15336 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15337 +#endif
15338 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15339 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15340 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15341 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15342 1: wrmsr /* Make changes effective */
15343
15344 /* Setup cr0 */
15345 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15346 * jump. In addition we need to ensure %cs is set so we make this
15347 * a far return.
15348 */
15349 + pax_set_fptr_mask
15350 movq initial_code(%rip),%rax
15351 pushq $0 # fake return address to stop unwinder
15352 pushq $__KERNEL_CS # set correct cs
15353 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15354 bad_address:
15355 jmp bad_address
15356
15357 - .section ".init.text","ax"
15358 + __INIT
15359 #ifdef CONFIG_EARLY_PRINTK
15360 .globl early_idt_handlers
15361 early_idt_handlers:
15362 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15363 #endif /* EARLY_PRINTK */
15364 1: hlt
15365 jmp 1b
15366 + .previous
15367
15368 #ifdef CONFIG_EARLY_PRINTK
15369 + __INITDATA
15370 early_recursion_flag:
15371 .long 0
15372 + .previous
15373
15374 + .section .rodata,"a",@progbits
15375 early_idt_msg:
15376 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15377 early_idt_ripmsg:
15378 .asciz "RIP %s\n"
15379 + .previous
15380 #endif /* CONFIG_EARLY_PRINTK */
15381 - .previous
15382
15383 + .section .rodata,"a",@progbits
15384 #define NEXT_PAGE(name) \
15385 .balign PAGE_SIZE; \
15386 ENTRY(name)
15387 @@ -338,7 +348,6 @@ ENTRY(name)
15388 i = i + 1 ; \
15389 .endr
15390
15391 - .data
15392 /*
15393 * This default setting generates an ident mapping at address 0x100000
15394 * and a mapping for the kernel that precisely maps virtual address
15395 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15396 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15397 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15398 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15399 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15400 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15401 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
15402 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15403 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15404 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15405 .org init_level4_pgt + L4_START_KERNEL*8, 0
15406 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15407 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15408
15409 +#ifdef CONFIG_PAX_PER_CPU_PGD
15410 +NEXT_PAGE(cpu_pgd)
15411 + .rept NR_CPUS
15412 + .fill 512,8,0
15413 + .endr
15414 +#endif
15415 +
15416 NEXT_PAGE(level3_ident_pgt)
15417 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15418 +#ifdef CONFIG_XEN
15419 .fill 511,8,0
15420 +#else
15421 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15422 + .fill 510,8,0
15423 +#endif
15424 +
15425 +NEXT_PAGE(level3_vmalloc_start_pgt)
15426 + .fill 512,8,0
15427 +
15428 +NEXT_PAGE(level3_vmalloc_end_pgt)
15429 + .fill 512,8,0
15430 +
15431 +NEXT_PAGE(level3_vmemmap_pgt)
15432 + .fill L3_VMEMMAP_START,8,0
15433 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15434
15435 NEXT_PAGE(level3_kernel_pgt)
15436 .fill L3_START_KERNEL,8,0
15437 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15438 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15439 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15440
15441 +NEXT_PAGE(level2_vmemmap_pgt)
15442 + .fill 512,8,0
15443 +
15444 NEXT_PAGE(level2_fixmap_pgt)
15445 - .fill 506,8,0
15446 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15447 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15448 - .fill 5,8,0
15449 + .fill 507,8,0
15450 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15451 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15452 + .fill 4,8,0
15453
15454 -NEXT_PAGE(level1_fixmap_pgt)
15455 +NEXT_PAGE(level1_vsyscall_pgt)
15456 .fill 512,8,0
15457
15458 -NEXT_PAGE(level2_ident_pgt)
15459 - /* Since I easily can, map the first 1G.
15460 + /* Since I easily can, map the first 2G.
15461 * Don't set NX because code runs from these pages.
15462 */
15463 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15464 +NEXT_PAGE(level2_ident_pgt)
15465 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15466
15467 NEXT_PAGE(level2_kernel_pgt)
15468 /*
15469 @@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15470 * If you want to increase this then increase MODULES_VADDR
15471 * too.)
15472 */
15473 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15474 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15475 -
15476 -NEXT_PAGE(level2_spare_pgt)
15477 - .fill 512, 8, 0
15478 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15479
15480 #undef PMDS
15481 #undef NEXT_PAGE
15482
15483 - .data
15484 + .align PAGE_SIZE
15485 +ENTRY(cpu_gdt_table)
15486 + .rept NR_CPUS
15487 + .quad 0x0000000000000000 /* NULL descriptor */
15488 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15489 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15490 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15491 + .quad 0x00cffb000000ffff /* __USER32_CS */
15492 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15493 + .quad 0x00affb000000ffff /* __USER_CS */
15494 +
15495 +#ifdef CONFIG_PAX_KERNEXEC
15496 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15497 +#else
15498 + .quad 0x0 /* unused */
15499 +#endif
15500 +
15501 + .quad 0,0 /* TSS */
15502 + .quad 0,0 /* LDT */
15503 + .quad 0,0,0 /* three TLS descriptors */
15504 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15505 + /* asm/segment.h:GDT_ENTRIES must match this */
15506 +
15507 + /* zero the remaining page */
15508 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15509 + .endr
15510 +
15511 .align 16
15512 .globl early_gdt_descr
15513 early_gdt_descr:
15514 .word GDT_ENTRIES*8-1
15515 early_gdt_descr_base:
15516 - .quad INIT_PER_CPU_VAR(gdt_page)
15517 + .quad cpu_gdt_table
15518
15519 ENTRY(phys_base)
15520 /* This must match the first entry in level2_kernel_pgt */
15521 .quad 0x0000000000000000
15522
15523 #include "../../x86/xen/xen-head.S"
15524 -
15525 - .section .bss, "aw", @nobits
15526 +
15527 + .section .rodata,"a",@progbits
15528 .align L1_CACHE_BYTES
15529 ENTRY(idt_table)
15530 - .skip IDT_ENTRIES * 16
15531 + .fill 512,8,0
15532
15533 __PAGE_ALIGNED_BSS
15534 .align PAGE_SIZE
15535 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15536 index 9c3bd4a..e1d9b35 100644
15537 --- a/arch/x86/kernel/i386_ksyms_32.c
15538 +++ b/arch/x86/kernel/i386_ksyms_32.c
15539 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15540 EXPORT_SYMBOL(cmpxchg8b_emu);
15541 #endif
15542
15543 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15544 +
15545 /* Networking helper routines. */
15546 EXPORT_SYMBOL(csum_partial_copy_generic);
15547 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15548 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15549
15550 EXPORT_SYMBOL(__get_user_1);
15551 EXPORT_SYMBOL(__get_user_2);
15552 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15553
15554 EXPORT_SYMBOL(csum_partial);
15555 EXPORT_SYMBOL(empty_zero_page);
15556 +
15557 +#ifdef CONFIG_PAX_KERNEXEC
15558 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15559 +#endif
15560 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15561 index 6104852..6114160 100644
15562 --- a/arch/x86/kernel/i8259.c
15563 +++ b/arch/x86/kernel/i8259.c
15564 @@ -210,7 +210,7 @@ spurious_8259A_irq:
15565 "spurious 8259A interrupt: IRQ%d.\n", irq);
15566 spurious_irq_mask |= irqmask;
15567 }
15568 - atomic_inc(&irq_err_count);
15569 + atomic_inc_unchecked(&irq_err_count);
15570 /*
15571 * Theoretically we do not have to handle this IRQ,
15572 * but in Linux this does not cause problems and is
15573 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15574 index 43e9ccf..44ccf6f 100644
15575 --- a/arch/x86/kernel/init_task.c
15576 +++ b/arch/x86/kernel/init_task.c
15577 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15578 * way process stacks are handled. This is done by having a special
15579 * "init_task" linker map entry..
15580 */
15581 -union thread_union init_thread_union __init_task_data =
15582 - { INIT_THREAD_INFO(init_task) };
15583 +union thread_union init_thread_union __init_task_data;
15584
15585 /*
15586 * Initial task structure.
15587 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15588 * section. Since TSS's are completely CPU-local, we want them
15589 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15590 */
15591 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15592 -
15593 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15594 +EXPORT_SYMBOL(init_tss);
15595 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15596 index 8c96897..be66bfa 100644
15597 --- a/arch/x86/kernel/ioport.c
15598 +++ b/arch/x86/kernel/ioport.c
15599 @@ -6,6 +6,7 @@
15600 #include <linux/sched.h>
15601 #include <linux/kernel.h>
15602 #include <linux/capability.h>
15603 +#include <linux/security.h>
15604 #include <linux/errno.h>
15605 #include <linux/types.h>
15606 #include <linux/ioport.h>
15607 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15608
15609 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15610 return -EINVAL;
15611 +#ifdef CONFIG_GRKERNSEC_IO
15612 + if (turn_on && grsec_disable_privio) {
15613 + gr_handle_ioperm();
15614 + return -EPERM;
15615 + }
15616 +#endif
15617 if (turn_on && !capable(CAP_SYS_RAWIO))
15618 return -EPERM;
15619
15620 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15621 * because the ->io_bitmap_max value must match the bitmap
15622 * contents:
15623 */
15624 - tss = &per_cpu(init_tss, get_cpu());
15625 + tss = init_tss + get_cpu();
15626
15627 if (turn_on)
15628 bitmap_clear(t->io_bitmap_ptr, from, num);
15629 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15630 return -EINVAL;
15631 /* Trying to gain more privileges? */
15632 if (level > old) {
15633 +#ifdef CONFIG_GRKERNSEC_IO
15634 + if (grsec_disable_privio) {
15635 + gr_handle_iopl();
15636 + return -EPERM;
15637 + }
15638 +#endif
15639 if (!capable(CAP_SYS_RAWIO))
15640 return -EPERM;
15641 }
15642 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15643 index 429e0c9..17b3ece 100644
15644 --- a/arch/x86/kernel/irq.c
15645 +++ b/arch/x86/kernel/irq.c
15646 @@ -18,7 +18,7 @@
15647 #include <asm/mce.h>
15648 #include <asm/hw_irq.h>
15649
15650 -atomic_t irq_err_count;
15651 +atomic_unchecked_t irq_err_count;
15652
15653 /* Function pointer for generic interrupt vector handling */
15654 void (*x86_platform_ipi_callback)(void) = NULL;
15655 @@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15656 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15657 seq_printf(p, " Machine check polls\n");
15658 #endif
15659 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15660 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15661 #if defined(CONFIG_X86_IO_APIC)
15662 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15663 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15664 #endif
15665 return 0;
15666 }
15667 @@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15668
15669 u64 arch_irq_stat(void)
15670 {
15671 - u64 sum = atomic_read(&irq_err_count);
15672 + u64 sum = atomic_read_unchecked(&irq_err_count);
15673
15674 #ifdef CONFIG_X86_IO_APIC
15675 - sum += atomic_read(&irq_mis_count);
15676 + sum += atomic_read_unchecked(&irq_mis_count);
15677 #endif
15678 return sum;
15679 }
15680 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15681 index 7209070..cbcd71a 100644
15682 --- a/arch/x86/kernel/irq_32.c
15683 +++ b/arch/x86/kernel/irq_32.c
15684 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15685 __asm__ __volatile__("andl %%esp,%0" :
15686 "=r" (sp) : "0" (THREAD_SIZE - 1));
15687
15688 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15689 + return sp < STACK_WARN;
15690 }
15691
15692 static void print_stack_overflow(void)
15693 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15694 * per-CPU IRQ handling contexts (thread information and stack)
15695 */
15696 union irq_ctx {
15697 - struct thread_info tinfo;
15698 - u32 stack[THREAD_SIZE/sizeof(u32)];
15699 + unsigned long previous_esp;
15700 + u32 stack[THREAD_SIZE/sizeof(u32)];
15701 } __attribute__((aligned(THREAD_SIZE)));
15702
15703 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15704 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15705 static inline int
15706 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15707 {
15708 - union irq_ctx *curctx, *irqctx;
15709 + union irq_ctx *irqctx;
15710 u32 *isp, arg1, arg2;
15711
15712 - curctx = (union irq_ctx *) current_thread_info();
15713 irqctx = __this_cpu_read(hardirq_ctx);
15714
15715 /*
15716 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15717 * handler) we can't do that and just have to keep using the
15718 * current stack (which is the irq stack already after all)
15719 */
15720 - if (unlikely(curctx == irqctx))
15721 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15722 return 0;
15723
15724 /* build the stack frame on the IRQ stack */
15725 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15726 - irqctx->tinfo.task = curctx->tinfo.task;
15727 - irqctx->tinfo.previous_esp = current_stack_pointer;
15728 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15729 + irqctx->previous_esp = current_stack_pointer;
15730
15731 - /*
15732 - * Copy the softirq bits in preempt_count so that the
15733 - * softirq checks work in the hardirq context.
15734 - */
15735 - irqctx->tinfo.preempt_count =
15736 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15737 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15739 + __set_fs(MAKE_MM_SEG(0));
15740 +#endif
15741
15742 if (unlikely(overflow))
15743 call_on_stack(print_stack_overflow, isp);
15744 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15745 : "0" (irq), "1" (desc), "2" (isp),
15746 "D" (desc->handle_irq)
15747 : "memory", "cc", "ecx");
15748 +
15749 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15750 + __set_fs(current_thread_info()->addr_limit);
15751 +#endif
15752 +
15753 return 1;
15754 }
15755
15756 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15757 */
15758 void __cpuinit irq_ctx_init(int cpu)
15759 {
15760 - union irq_ctx *irqctx;
15761 -
15762 if (per_cpu(hardirq_ctx, cpu))
15763 return;
15764
15765 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15766 - THREAD_FLAGS,
15767 - THREAD_ORDER));
15768 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15769 - irqctx->tinfo.cpu = cpu;
15770 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15771 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15772 -
15773 - per_cpu(hardirq_ctx, cpu) = irqctx;
15774 -
15775 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15776 - THREAD_FLAGS,
15777 - THREAD_ORDER));
15778 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15779 - irqctx->tinfo.cpu = cpu;
15780 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15781 -
15782 - per_cpu(softirq_ctx, cpu) = irqctx;
15783 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15784 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15785
15786 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15787 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15788 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15789 asmlinkage void do_softirq(void)
15790 {
15791 unsigned long flags;
15792 - struct thread_info *curctx;
15793 union irq_ctx *irqctx;
15794 u32 *isp;
15795
15796 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15797 local_irq_save(flags);
15798
15799 if (local_softirq_pending()) {
15800 - curctx = current_thread_info();
15801 irqctx = __this_cpu_read(softirq_ctx);
15802 - irqctx->tinfo.task = curctx->task;
15803 - irqctx->tinfo.previous_esp = current_stack_pointer;
15804 + irqctx->previous_esp = current_stack_pointer;
15805
15806 /* build the stack frame on the softirq stack */
15807 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15808 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15809 +
15810 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15811 + __set_fs(MAKE_MM_SEG(0));
15812 +#endif
15813
15814 call_on_stack(__do_softirq, isp);
15815 +
15816 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15817 + __set_fs(current_thread_info()->addr_limit);
15818 +#endif
15819 +
15820 /*
15821 * Shouldn't happen, we returned above if in_interrupt():
15822 */
15823 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15824 index 69bca46..0bac999 100644
15825 --- a/arch/x86/kernel/irq_64.c
15826 +++ b/arch/x86/kernel/irq_64.c
15827 @@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15828 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15829 u64 curbase = (u64)task_stack_page(current);
15830
15831 - if (user_mode_vm(regs))
15832 + if (user_mode(regs))
15833 return;
15834
15835 WARN_ONCE(regs->sp >= curbase &&
15836 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15837 index faba577..93b9e71 100644
15838 --- a/arch/x86/kernel/kgdb.c
15839 +++ b/arch/x86/kernel/kgdb.c
15840 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15841 #ifdef CONFIG_X86_32
15842 switch (regno) {
15843 case GDB_SS:
15844 - if (!user_mode_vm(regs))
15845 + if (!user_mode(regs))
15846 *(unsigned long *)mem = __KERNEL_DS;
15847 break;
15848 case GDB_SP:
15849 - if (!user_mode_vm(regs))
15850 + if (!user_mode(regs))
15851 *(unsigned long *)mem = kernel_stack_pointer(regs);
15852 break;
15853 case GDB_GS:
15854 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15855 case 'k':
15856 /* clear the trace bit */
15857 linux_regs->flags &= ~X86_EFLAGS_TF;
15858 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15859 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15860
15861 /* set the trace bit if we're stepping */
15862 if (remcomInBuffer[0] == 's') {
15863 linux_regs->flags |= X86_EFLAGS_TF;
15864 - atomic_set(&kgdb_cpu_doing_single_step,
15865 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15866 raw_smp_processor_id());
15867 }
15868
15869 @@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15870
15871 switch (cmd) {
15872 case DIE_DEBUG:
15873 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15874 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15875 if (user_mode(regs))
15876 return single_step_cont(regs, args);
15877 break;
15878 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15879 index 7da647d..56fe348 100644
15880 --- a/arch/x86/kernel/kprobes.c
15881 +++ b/arch/x86/kernel/kprobes.c
15882 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15883 } __attribute__((packed)) *insn;
15884
15885 insn = (struct __arch_relative_insn *)from;
15886 +
15887 + pax_open_kernel();
15888 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15889 insn->op = op;
15890 + pax_close_kernel();
15891 }
15892
15893 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15894 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15895 kprobe_opcode_t opcode;
15896 kprobe_opcode_t *orig_opcodes = opcodes;
15897
15898 - if (search_exception_tables((unsigned long)opcodes))
15899 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15900 return 0; /* Page fault may occur on this address. */
15901
15902 retry:
15903 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15904 }
15905 }
15906 insn_get_length(&insn);
15907 + pax_open_kernel();
15908 memcpy(dest, insn.kaddr, insn.length);
15909 + pax_close_kernel();
15910
15911 #ifdef CONFIG_X86_64
15912 if (insn_rip_relative(&insn)) {
15913 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15914 (u8 *) dest;
15915 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15916 disp = (u8 *) dest + insn_offset_displacement(&insn);
15917 + pax_open_kernel();
15918 *(s32 *) disp = (s32) newdisp;
15919 + pax_close_kernel();
15920 }
15921 #endif
15922 return insn.length;
15923 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15924 */
15925 __copy_instruction(p->ainsn.insn, p->addr, 0);
15926
15927 - if (can_boost(p->addr))
15928 + if (can_boost(ktla_ktva(p->addr)))
15929 p->ainsn.boostable = 0;
15930 else
15931 p->ainsn.boostable = -1;
15932
15933 - p->opcode = *p->addr;
15934 + p->opcode = *(ktla_ktva(p->addr));
15935 }
15936
15937 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15938 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15939 * nor set current_kprobe, because it doesn't use single
15940 * stepping.
15941 */
15942 - regs->ip = (unsigned long)p->ainsn.insn;
15943 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15944 preempt_enable_no_resched();
15945 return;
15946 }
15947 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15948 if (p->opcode == BREAKPOINT_INSTRUCTION)
15949 regs->ip = (unsigned long)p->addr;
15950 else
15951 - regs->ip = (unsigned long)p->ainsn.insn;
15952 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15953 }
15954
15955 /*
15956 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15957 setup_singlestep(p, regs, kcb, 0);
15958 return 1;
15959 }
15960 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
15961 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15962 /*
15963 * The breakpoint instruction was removed right
15964 * after we hit it. Another cpu has removed
15965 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15966 " movq %rax, 152(%rsp)\n"
15967 RESTORE_REGS_STRING
15968 " popfq\n"
15969 +#ifdef KERNEXEC_PLUGIN
15970 + " btsq $63,(%rsp)\n"
15971 +#endif
15972 #else
15973 " pushf\n"
15974 SAVE_REGS_STRING
15975 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15976 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15977 {
15978 unsigned long *tos = stack_addr(regs);
15979 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15980 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15981 unsigned long orig_ip = (unsigned long)p->addr;
15982 kprobe_opcode_t *insn = p->ainsn.insn;
15983
15984 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15985 struct die_args *args = data;
15986 int ret = NOTIFY_DONE;
15987
15988 - if (args->regs && user_mode_vm(args->regs))
15989 + if (args->regs && user_mode(args->regs))
15990 return ret;
15991
15992 switch (val) {
15993 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15994 * Verify if the address gap is in 2GB range, because this uses
15995 * a relative jump.
15996 */
15997 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15998 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15999 if (abs(rel) > 0x7fffffff)
16000 return -ERANGE;
16001
16002 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
16003 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
16004
16005 /* Set probe function call */
16006 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
16007 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
16008
16009 /* Set returning jmp instruction at the tail of out-of-line buffer */
16010 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
16011 - (u8 *)op->kp.addr + op->optinsn.size);
16012 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
16013
16014 flush_icache_range((unsigned long) buf,
16015 (unsigned long) buf + TMPL_END_IDX +
16016 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
16017 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
16018
16019 /* Backup instructions which will be replaced by jump address */
16020 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
16021 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
16022 RELATIVE_ADDR_SIZE);
16023
16024 insn_buf[0] = RELATIVEJUMP_OPCODE;
16025 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
16026 index a9c2116..a52d4fc 100644
16027 --- a/arch/x86/kernel/kvm.c
16028 +++ b/arch/x86/kernel/kvm.c
16029 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
16030 pv_mmu_ops.set_pud = kvm_set_pud;
16031 #if PAGETABLE_LEVELS == 4
16032 pv_mmu_ops.set_pgd = kvm_set_pgd;
16033 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
16034 #endif
16035 #endif
16036 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
16037 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
16038 index ea69726..604d066 100644
16039 --- a/arch/x86/kernel/ldt.c
16040 +++ b/arch/x86/kernel/ldt.c
16041 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
16042 if (reload) {
16043 #ifdef CONFIG_SMP
16044 preempt_disable();
16045 - load_LDT(pc);
16046 + load_LDT_nolock(pc);
16047 if (!cpumask_equal(mm_cpumask(current->mm),
16048 cpumask_of(smp_processor_id())))
16049 smp_call_function(flush_ldt, current->mm, 1);
16050 preempt_enable();
16051 #else
16052 - load_LDT(pc);
16053 + load_LDT_nolock(pc);
16054 #endif
16055 }
16056 if (oldsize) {
16057 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
16058 return err;
16059
16060 for (i = 0; i < old->size; i++)
16061 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
16062 + write_ldt_entry(new->ldt, i, old->ldt + i);
16063 return 0;
16064 }
16065
16066 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
16067 retval = copy_ldt(&mm->context, &old_mm->context);
16068 mutex_unlock(&old_mm->context.lock);
16069 }
16070 +
16071 + if (tsk == current) {
16072 + mm->context.vdso = 0;
16073 +
16074 +#ifdef CONFIG_X86_32
16075 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16076 + mm->context.user_cs_base = 0UL;
16077 + mm->context.user_cs_limit = ~0UL;
16078 +
16079 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16080 + cpus_clear(mm->context.cpu_user_cs_mask);
16081 +#endif
16082 +
16083 +#endif
16084 +#endif
16085 +
16086 + }
16087 +
16088 return retval;
16089 }
16090
16091 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
16092 }
16093 }
16094
16095 +#ifdef CONFIG_PAX_SEGMEXEC
16096 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
16097 + error = -EINVAL;
16098 + goto out_unlock;
16099 + }
16100 +#endif
16101 +
16102 fill_ldt(&ldt, &ldt_info);
16103 if (oldmode)
16104 ldt.avl = 0;
16105 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
16106 index a3fa43b..8966f4c 100644
16107 --- a/arch/x86/kernel/machine_kexec_32.c
16108 +++ b/arch/x86/kernel/machine_kexec_32.c
16109 @@ -27,7 +27,7 @@
16110 #include <asm/cacheflush.h>
16111 #include <asm/debugreg.h>
16112
16113 -static void set_idt(void *newidt, __u16 limit)
16114 +static void set_idt(struct desc_struct *newidt, __u16 limit)
16115 {
16116 struct desc_ptr curidt;
16117
16118 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
16119 }
16120
16121
16122 -static void set_gdt(void *newgdt, __u16 limit)
16123 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
16124 {
16125 struct desc_ptr curgdt;
16126
16127 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
16128 }
16129
16130 control_page = page_address(image->control_code_page);
16131 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
16132 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
16133
16134 relocate_kernel_ptr = control_page;
16135 page_list[PA_CONTROL_PAGE] = __pa(control_page);
16136 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
16137 index 3ca42d0..7cff8cc 100644
16138 --- a/arch/x86/kernel/microcode_intel.c
16139 +++ b/arch/x86/kernel/microcode_intel.c
16140 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
16141
16142 static int get_ucode_user(void *to, const void *from, size_t n)
16143 {
16144 - return copy_from_user(to, from, n);
16145 + return copy_from_user(to, (const void __force_user *)from, n);
16146 }
16147
16148 static enum ucode_state
16149 request_microcode_user(int cpu, const void __user *buf, size_t size)
16150 {
16151 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
16152 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
16153 }
16154
16155 static void microcode_fini_cpu(int cpu)
16156 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
16157 index 925179f..267ac7a 100644
16158 --- a/arch/x86/kernel/module.c
16159 +++ b/arch/x86/kernel/module.c
16160 @@ -36,15 +36,60 @@
16161 #define DEBUGP(fmt...)
16162 #endif
16163
16164 -void *module_alloc(unsigned long size)
16165 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
16166 {
16167 - if (PAGE_ALIGN(size) > MODULES_LEN)
16168 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
16169 return NULL;
16170 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
16171 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
16172 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
16173 -1, __builtin_return_address(0));
16174 }
16175
16176 +void *module_alloc(unsigned long size)
16177 +{
16178 +
16179 +#ifdef CONFIG_PAX_KERNEXEC
16180 + return __module_alloc(size, PAGE_KERNEL);
16181 +#else
16182 + return __module_alloc(size, PAGE_KERNEL_EXEC);
16183 +#endif
16184 +
16185 +}
16186 +
16187 +#ifdef CONFIG_PAX_KERNEXEC
16188 +#ifdef CONFIG_X86_32
16189 +void *module_alloc_exec(unsigned long size)
16190 +{
16191 + struct vm_struct *area;
16192 +
16193 + if (size == 0)
16194 + return NULL;
16195 +
16196 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
16197 + return area ? area->addr : NULL;
16198 +}
16199 +EXPORT_SYMBOL(module_alloc_exec);
16200 +
16201 +void module_free_exec(struct module *mod, void *module_region)
16202 +{
16203 + vunmap(module_region);
16204 +}
16205 +EXPORT_SYMBOL(module_free_exec);
16206 +#else
16207 +void module_free_exec(struct module *mod, void *module_region)
16208 +{
16209 + module_free(mod, module_region);
16210 +}
16211 +EXPORT_SYMBOL(module_free_exec);
16212 +
16213 +void *module_alloc_exec(unsigned long size)
16214 +{
16215 + return __module_alloc(size, PAGE_KERNEL_RX);
16216 +}
16217 +EXPORT_SYMBOL(module_alloc_exec);
16218 +#endif
16219 +#endif
16220 +
16221 #ifdef CONFIG_X86_32
16222 int apply_relocate(Elf32_Shdr *sechdrs,
16223 const char *strtab,
16224 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16225 unsigned int i;
16226 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
16227 Elf32_Sym *sym;
16228 - uint32_t *location;
16229 + uint32_t *plocation, location;
16230
16231 DEBUGP("Applying relocate section %u to %u\n", relsec,
16232 sechdrs[relsec].sh_info);
16233 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
16234 /* This is where to make the change */
16235 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
16236 - + rel[i].r_offset;
16237 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
16238 + location = (uint32_t)plocation;
16239 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
16240 + plocation = ktla_ktva((void *)plocation);
16241 /* This is the symbol it is referring to. Note that all
16242 undefined symbols have been resolved. */
16243 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
16244 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16245 switch (ELF32_R_TYPE(rel[i].r_info)) {
16246 case R_386_32:
16247 /* We add the value into the location given */
16248 - *location += sym->st_value;
16249 + pax_open_kernel();
16250 + *plocation += sym->st_value;
16251 + pax_close_kernel();
16252 break;
16253 case R_386_PC32:
16254 /* Add the value, subtract its postition */
16255 - *location += sym->st_value - (uint32_t)location;
16256 + pax_open_kernel();
16257 + *plocation += sym->st_value - location;
16258 + pax_close_kernel();
16259 break;
16260 default:
16261 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16262 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16263 case R_X86_64_NONE:
16264 break;
16265 case R_X86_64_64:
16266 + pax_open_kernel();
16267 *(u64 *)loc = val;
16268 + pax_close_kernel();
16269 break;
16270 case R_X86_64_32:
16271 + pax_open_kernel();
16272 *(u32 *)loc = val;
16273 + pax_close_kernel();
16274 if (val != *(u32 *)loc)
16275 goto overflow;
16276 break;
16277 case R_X86_64_32S:
16278 + pax_open_kernel();
16279 *(s32 *)loc = val;
16280 + pax_close_kernel();
16281 if ((s64)val != *(s32 *)loc)
16282 goto overflow;
16283 break;
16284 case R_X86_64_PC32:
16285 val -= (u64)loc;
16286 + pax_open_kernel();
16287 *(u32 *)loc = val;
16288 + pax_close_kernel();
16289 +
16290 #if 0
16291 if ((s64)val != *(s32 *)loc)
16292 goto overflow;
16293 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16294 index e88f37b..1353db6 100644
16295 --- a/arch/x86/kernel/nmi.c
16296 +++ b/arch/x86/kernel/nmi.c
16297 @@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16298 dotraplinkage notrace __kprobes void
16299 do_nmi(struct pt_regs *regs, long error_code)
16300 {
16301 +
16302 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16303 + if (!user_mode(regs)) {
16304 + unsigned long cs = regs->cs & 0xFFFF;
16305 + unsigned long ip = ktva_ktla(regs->ip);
16306 +
16307 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16308 + regs->ip = ip;
16309 + }
16310 +#endif
16311 +
16312 nmi_enter();
16313
16314 inc_irq_stat(__nmi_count);
16315 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16316 index 676b8c7..870ba04 100644
16317 --- a/arch/x86/kernel/paravirt-spinlocks.c
16318 +++ b/arch/x86/kernel/paravirt-spinlocks.c
16319 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16320 arch_spin_lock(lock);
16321 }
16322
16323 -struct pv_lock_ops pv_lock_ops = {
16324 +struct pv_lock_ops pv_lock_ops __read_only = {
16325 #ifdef CONFIG_SMP
16326 .spin_is_locked = __ticket_spin_is_locked,
16327 .spin_is_contended = __ticket_spin_is_contended,
16328 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16329 index d90272e..6bb013b 100644
16330 --- a/arch/x86/kernel/paravirt.c
16331 +++ b/arch/x86/kernel/paravirt.c
16332 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16333 {
16334 return x;
16335 }
16336 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16337 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16338 +#endif
16339
16340 void __init default_banner(void)
16341 {
16342 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16343 if (opfunc == NULL)
16344 /* If there's no function, patch it with a ud2a (BUG) */
16345 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16346 - else if (opfunc == _paravirt_nop)
16347 + else if (opfunc == (void *)_paravirt_nop)
16348 /* If the operation is a nop, then nop the callsite */
16349 ret = paravirt_patch_nop();
16350
16351 /* identity functions just return their single argument */
16352 - else if (opfunc == _paravirt_ident_32)
16353 + else if (opfunc == (void *)_paravirt_ident_32)
16354 ret = paravirt_patch_ident_32(insnbuf, len);
16355 - else if (opfunc == _paravirt_ident_64)
16356 + else if (opfunc == (void *)_paravirt_ident_64)
16357 ret = paravirt_patch_ident_64(insnbuf, len);
16358 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16359 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16360 + ret = paravirt_patch_ident_64(insnbuf, len);
16361 +#endif
16362
16363 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16364 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16365 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16366 if (insn_len > len || start == NULL)
16367 insn_len = len;
16368 else
16369 - memcpy(insnbuf, start, insn_len);
16370 + memcpy(insnbuf, ktla_ktva(start), insn_len);
16371
16372 return insn_len;
16373 }
16374 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16375 preempt_enable();
16376 }
16377
16378 -struct pv_info pv_info = {
16379 +struct pv_info pv_info __read_only = {
16380 .name = "bare hardware",
16381 .paravirt_enabled = 0,
16382 .kernel_rpl = 0,
16383 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
16384 #endif
16385 };
16386
16387 -struct pv_init_ops pv_init_ops = {
16388 +struct pv_init_ops pv_init_ops __read_only = {
16389 .patch = native_patch,
16390 };
16391
16392 -struct pv_time_ops pv_time_ops = {
16393 +struct pv_time_ops pv_time_ops __read_only = {
16394 .sched_clock = native_sched_clock,
16395 .steal_clock = native_steal_clock,
16396 };
16397
16398 -struct pv_irq_ops pv_irq_ops = {
16399 +struct pv_irq_ops pv_irq_ops __read_only = {
16400 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16401 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16402 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16403 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16404 #endif
16405 };
16406
16407 -struct pv_cpu_ops pv_cpu_ops = {
16408 +struct pv_cpu_ops pv_cpu_ops __read_only = {
16409 .cpuid = native_cpuid,
16410 .get_debugreg = native_get_debugreg,
16411 .set_debugreg = native_set_debugreg,
16412 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16413 .end_context_switch = paravirt_nop,
16414 };
16415
16416 -struct pv_apic_ops pv_apic_ops = {
16417 +struct pv_apic_ops pv_apic_ops __read_only = {
16418 #ifdef CONFIG_X86_LOCAL_APIC
16419 .startup_ipi_hook = paravirt_nop,
16420 #endif
16421 };
16422
16423 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16424 +#ifdef CONFIG_X86_32
16425 +#ifdef CONFIG_X86_PAE
16426 +/* 64-bit pagetable entries */
16427 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16428 +#else
16429 /* 32-bit pagetable entries */
16430 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16431 +#endif
16432 #else
16433 /* 64-bit pagetable entries */
16434 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16435 #endif
16436
16437 -struct pv_mmu_ops pv_mmu_ops = {
16438 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16439
16440 .read_cr2 = native_read_cr2,
16441 .write_cr2 = native_write_cr2,
16442 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16443 .make_pud = PTE_IDENT,
16444
16445 .set_pgd = native_set_pgd,
16446 + .set_pgd_batched = native_set_pgd_batched,
16447 #endif
16448 #endif /* PAGETABLE_LEVELS >= 3 */
16449
16450 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16451 },
16452
16453 .set_fixmap = native_set_fixmap,
16454 +
16455 +#ifdef CONFIG_PAX_KERNEXEC
16456 + .pax_open_kernel = native_pax_open_kernel,
16457 + .pax_close_kernel = native_pax_close_kernel,
16458 +#endif
16459 +
16460 };
16461
16462 EXPORT_SYMBOL_GPL(pv_time_ops);
16463 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16464 index 35ccf75..7a15747 100644
16465 --- a/arch/x86/kernel/pci-iommu_table.c
16466 +++ b/arch/x86/kernel/pci-iommu_table.c
16467 @@ -2,7 +2,7 @@
16468 #include <asm/iommu_table.h>
16469 #include <linux/string.h>
16470 #include <linux/kallsyms.h>
16471 -
16472 +#include <linux/sched.h>
16473
16474 #define DEBUG 1
16475
16476 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16477 index ee5d4fb..426649b 100644
16478 --- a/arch/x86/kernel/process.c
16479 +++ b/arch/x86/kernel/process.c
16480 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16481
16482 void free_thread_info(struct thread_info *ti)
16483 {
16484 - free_thread_xstate(ti->task);
16485 free_pages((unsigned long)ti, THREAD_ORDER);
16486 }
16487
16488 +static struct kmem_cache *task_struct_cachep;
16489 +
16490 void arch_task_cache_init(void)
16491 {
16492 - task_xstate_cachep =
16493 - kmem_cache_create("task_xstate", xstate_size,
16494 + /* create a slab on which task_structs can be allocated */
16495 + task_struct_cachep =
16496 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16497 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16498 +
16499 + task_xstate_cachep =
16500 + kmem_cache_create("task_xstate", xstate_size,
16501 __alignof__(union thread_xstate),
16502 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16503 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16504 +}
16505 +
16506 +struct task_struct *alloc_task_struct_node(int node)
16507 +{
16508 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16509 +}
16510 +
16511 +void free_task_struct(struct task_struct *task)
16512 +{
16513 + free_thread_xstate(task);
16514 + kmem_cache_free(task_struct_cachep, task);
16515 }
16516
16517 /*
16518 @@ -70,7 +87,7 @@ void exit_thread(void)
16519 unsigned long *bp = t->io_bitmap_ptr;
16520
16521 if (bp) {
16522 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16523 + struct tss_struct *tss = init_tss + get_cpu();
16524
16525 t->io_bitmap_ptr = NULL;
16526 clear_thread_flag(TIF_IO_BITMAP);
16527 @@ -106,7 +123,7 @@ void show_regs_common(void)
16528
16529 printk(KERN_CONT "\n");
16530 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16531 - current->pid, current->comm, print_tainted(),
16532 + task_pid_nr(current), current->comm, print_tainted(),
16533 init_utsname()->release,
16534 (int)strcspn(init_utsname()->version, " "),
16535 init_utsname()->version);
16536 @@ -120,6 +137,9 @@ void flush_thread(void)
16537 {
16538 struct task_struct *tsk = current;
16539
16540 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16541 + loadsegment(gs, 0);
16542 +#endif
16543 flush_ptrace_hw_breakpoint(tsk);
16544 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16545 /*
16546 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16547 regs.di = (unsigned long) arg;
16548
16549 #ifdef CONFIG_X86_32
16550 - regs.ds = __USER_DS;
16551 - regs.es = __USER_DS;
16552 + regs.ds = __KERNEL_DS;
16553 + regs.es = __KERNEL_DS;
16554 regs.fs = __KERNEL_PERCPU;
16555 - regs.gs = __KERNEL_STACK_CANARY;
16556 + savesegment(gs, regs.gs);
16557 #else
16558 regs.ss = __KERNEL_DS;
16559 #endif
16560 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16561
16562 return ret;
16563 }
16564 -void stop_this_cpu(void *dummy)
16565 +__noreturn void stop_this_cpu(void *dummy)
16566 {
16567 local_irq_disable();
16568 /*
16569 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16570 }
16571 early_param("idle", idle_setup);
16572
16573 -unsigned long arch_align_stack(unsigned long sp)
16574 +#ifdef CONFIG_PAX_RANDKSTACK
16575 +void pax_randomize_kstack(struct pt_regs *regs)
16576 {
16577 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16578 - sp -= get_random_int() % 8192;
16579 - return sp & ~0xf;
16580 -}
16581 + struct thread_struct *thread = &current->thread;
16582 + unsigned long time;
16583
16584 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16585 -{
16586 - unsigned long range_end = mm->brk + 0x02000000;
16587 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16588 -}
16589 + if (!randomize_va_space)
16590 + return;
16591 +
16592 + if (v8086_mode(regs))
16593 + return;
16594
16595 + rdtscl(time);
16596 +
16597 + /* P4 seems to return a 0 LSB, ignore it */
16598 +#ifdef CONFIG_MPENTIUM4
16599 + time &= 0x3EUL;
16600 + time <<= 2;
16601 +#elif defined(CONFIG_X86_64)
16602 + time &= 0xFUL;
16603 + time <<= 4;
16604 +#else
16605 + time &= 0x1FUL;
16606 + time <<= 3;
16607 +#endif
16608 +
16609 + thread->sp0 ^= time;
16610 + load_sp0(init_tss + smp_processor_id(), thread);
16611 +
16612 +#ifdef CONFIG_X86_64
16613 + percpu_write(kernel_stack, thread->sp0);
16614 +#endif
16615 +}
16616 +#endif
16617 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16618 index 795b79f..063767a 100644
16619 --- a/arch/x86/kernel/process_32.c
16620 +++ b/arch/x86/kernel/process_32.c
16621 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16622 unsigned long thread_saved_pc(struct task_struct *tsk)
16623 {
16624 return ((unsigned long *)tsk->thread.sp)[3];
16625 +//XXX return tsk->thread.eip;
16626 }
16627
16628 #ifndef CONFIG_SMP
16629 @@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16630 unsigned long sp;
16631 unsigned short ss, gs;
16632
16633 - if (user_mode_vm(regs)) {
16634 + if (user_mode(regs)) {
16635 sp = regs->sp;
16636 ss = regs->ss & 0xffff;
16637 - gs = get_user_gs(regs);
16638 } else {
16639 sp = kernel_stack_pointer(regs);
16640 savesegment(ss, ss);
16641 - savesegment(gs, gs);
16642 }
16643 + gs = get_user_gs(regs);
16644
16645 show_regs_common();
16646
16647 @@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16648 struct task_struct *tsk;
16649 int err;
16650
16651 - childregs = task_pt_regs(p);
16652 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16653 *childregs = *regs;
16654 childregs->ax = 0;
16655 childregs->sp = sp;
16656
16657 p->thread.sp = (unsigned long) childregs;
16658 p->thread.sp0 = (unsigned long) (childregs+1);
16659 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16660
16661 p->thread.ip = (unsigned long) ret_from_fork;
16662
16663 @@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16664 struct thread_struct *prev = &prev_p->thread,
16665 *next = &next_p->thread;
16666 int cpu = smp_processor_id();
16667 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16668 + struct tss_struct *tss = init_tss + cpu;
16669 bool preload_fpu;
16670
16671 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16672 @@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16673 */
16674 lazy_save_gs(prev->gs);
16675
16676 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16677 + __set_fs(task_thread_info(next_p)->addr_limit);
16678 +#endif
16679 +
16680 /*
16681 * Load the per-thread Thread-Local Storage descriptor.
16682 */
16683 @@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16684 */
16685 arch_end_context_switch(next_p);
16686
16687 + percpu_write(current_task, next_p);
16688 + percpu_write(current_tinfo, &next_p->tinfo);
16689 +
16690 if (preload_fpu)
16691 __math_state_restore();
16692
16693 @@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16694 if (prev->gs | next->gs)
16695 lazy_load_gs(next->gs);
16696
16697 - percpu_write(current_task, next_p);
16698 -
16699 return prev_p;
16700 }
16701
16702 @@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
16703 } while (count++ < 16);
16704 return 0;
16705 }
16706 -
16707 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16708 index 3bd7e6e..90b2bcf 100644
16709 --- a/arch/x86/kernel/process_64.c
16710 +++ b/arch/x86/kernel/process_64.c
16711 @@ -89,7 +89,7 @@ static void __exit_idle(void)
16712 void exit_idle(void)
16713 {
16714 /* idle loop has pid 0 */
16715 - if (current->pid)
16716 + if (task_pid_nr(current))
16717 return;
16718 __exit_idle();
16719 }
16720 @@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16721 struct pt_regs *childregs;
16722 struct task_struct *me = current;
16723
16724 - childregs = ((struct pt_regs *)
16725 - (THREAD_SIZE + task_stack_page(p))) - 1;
16726 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16727 *childregs = *regs;
16728
16729 childregs->ax = 0;
16730 @@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16731 p->thread.sp = (unsigned long) childregs;
16732 p->thread.sp0 = (unsigned long) (childregs+1);
16733 p->thread.usersp = me->thread.usersp;
16734 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16735
16736 set_tsk_thread_flag(p, TIF_FORK);
16737
16738 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16739 struct thread_struct *prev = &prev_p->thread;
16740 struct thread_struct *next = &next_p->thread;
16741 int cpu = smp_processor_id();
16742 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16743 + struct tss_struct *tss = init_tss + cpu;
16744 unsigned fsindex, gsindex;
16745 bool preload_fpu;
16746
16747 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16748 prev->usersp = percpu_read(old_rsp);
16749 percpu_write(old_rsp, next->usersp);
16750 percpu_write(current_task, next_p);
16751 + percpu_write(current_tinfo, &next_p->tinfo);
16752
16753 - percpu_write(kernel_stack,
16754 - (unsigned long)task_stack_page(next_p) +
16755 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16756 + percpu_write(kernel_stack, next->sp0);
16757
16758 /*
16759 * Now maybe reload the debug registers and handle I/O bitmaps
16760 @@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
16761 if (!p || p == current || p->state == TASK_RUNNING)
16762 return 0;
16763 stack = (unsigned long)task_stack_page(p);
16764 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16765 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16766 return 0;
16767 fp = *(u64 *)(p->thread.sp);
16768 do {
16769 - if (fp < (unsigned long)stack ||
16770 - fp >= (unsigned long)stack+THREAD_SIZE)
16771 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16772 return 0;
16773 ip = *(u64 *)(fp+8);
16774 if (!in_sched_functions(ip))
16775 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16776 index 8252879..d3219e0 100644
16777 --- a/arch/x86/kernel/ptrace.c
16778 +++ b/arch/x86/kernel/ptrace.c
16779 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16780 unsigned long addr, unsigned long data)
16781 {
16782 int ret;
16783 - unsigned long __user *datap = (unsigned long __user *)data;
16784 + unsigned long __user *datap = (__force unsigned long __user *)data;
16785
16786 switch (request) {
16787 /* read the word at location addr in the USER area. */
16788 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16789 if ((int) addr < 0)
16790 return -EIO;
16791 ret = do_get_thread_area(child, addr,
16792 - (struct user_desc __user *)data);
16793 + (__force struct user_desc __user *) data);
16794 break;
16795
16796 case PTRACE_SET_THREAD_AREA:
16797 if ((int) addr < 0)
16798 return -EIO;
16799 ret = do_set_thread_area(child, addr,
16800 - (struct user_desc __user *)data, 0);
16801 + (__force struct user_desc __user *) data, 0);
16802 break;
16803 #endif
16804
16805 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16806 memset(info, 0, sizeof(*info));
16807 info->si_signo = SIGTRAP;
16808 info->si_code = si_code;
16809 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16810 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16811 }
16812
16813 void user_single_step_siginfo(struct task_struct *tsk,
16814 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16815 index 42eb330..139955c 100644
16816 --- a/arch/x86/kernel/pvclock.c
16817 +++ b/arch/x86/kernel/pvclock.c
16818 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16819 return pv_tsc_khz;
16820 }
16821
16822 -static atomic64_t last_value = ATOMIC64_INIT(0);
16823 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16824
16825 void pvclock_resume(void)
16826 {
16827 - atomic64_set(&last_value, 0);
16828 + atomic64_set_unchecked(&last_value, 0);
16829 }
16830
16831 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16832 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16833 * updating at the same time, and one of them could be slightly behind,
16834 * making the assumption that last_value always go forward fail to hold.
16835 */
16836 - last = atomic64_read(&last_value);
16837 + last = atomic64_read_unchecked(&last_value);
16838 do {
16839 if (ret < last)
16840 return last;
16841 - last = atomic64_cmpxchg(&last_value, last, ret);
16842 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16843 } while (unlikely(last != ret));
16844
16845 return ret;
16846 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16847 index 37a458b..e63d183 100644
16848 --- a/arch/x86/kernel/reboot.c
16849 +++ b/arch/x86/kernel/reboot.c
16850 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16851 EXPORT_SYMBOL(pm_power_off);
16852
16853 static const struct desc_ptr no_idt = {};
16854 -static int reboot_mode;
16855 +static unsigned short reboot_mode;
16856 enum reboot_type reboot_type = BOOT_ACPI;
16857 int reboot_force;
16858
16859 @@ -324,13 +324,17 @@ core_initcall(reboot_init);
16860 extern const unsigned char machine_real_restart_asm[];
16861 extern const u64 machine_real_restart_gdt[3];
16862
16863 -void machine_real_restart(unsigned int type)
16864 +__noreturn void machine_real_restart(unsigned int type)
16865 {
16866 void *restart_va;
16867 unsigned long restart_pa;
16868 - void (*restart_lowmem)(unsigned int);
16869 + void (* __noreturn restart_lowmem)(unsigned int);
16870 u64 *lowmem_gdt;
16871
16872 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16873 + struct desc_struct *gdt;
16874 +#endif
16875 +
16876 local_irq_disable();
16877
16878 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16879 @@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16880 boot)". This seems like a fairly standard thing that gets set by
16881 REBOOT.COM programs, and the previous reset routine did this
16882 too. */
16883 - *((unsigned short *)0x472) = reboot_mode;
16884 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16885
16886 /* Patch the GDT in the low memory trampoline */
16887 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16888
16889 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16890 restart_pa = virt_to_phys(restart_va);
16891 - restart_lowmem = (void (*)(unsigned int))restart_pa;
16892 + restart_lowmem = (void *)restart_pa;
16893
16894 /* GDT[0]: GDT self-pointer */
16895 lowmem_gdt[0] =
16896 @@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16897 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16898
16899 /* Jump to the identity-mapped low memory code */
16900 +
16901 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16902 + gdt = get_cpu_gdt_table(smp_processor_id());
16903 + pax_open_kernel();
16904 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16905 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16906 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16907 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16908 +#endif
16909 +#ifdef CONFIG_PAX_KERNEXEC
16910 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16911 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16912 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16913 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16914 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16915 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16916 +#endif
16917 + pax_close_kernel();
16918 +#endif
16919 +
16920 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16921 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16922 + unreachable();
16923 +#else
16924 restart_lowmem(type);
16925 +#endif
16926 +
16927 }
16928 #ifdef CONFIG_APM_MODULE
16929 EXPORT_SYMBOL(machine_real_restart);
16930 @@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16931 * try to force a triple fault and then cycle between hitting the keyboard
16932 * controller and doing that
16933 */
16934 -static void native_machine_emergency_restart(void)
16935 +__noreturn static void native_machine_emergency_restart(void)
16936 {
16937 int i;
16938 int attempt = 0;
16939 @@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16940 #endif
16941 }
16942
16943 -static void __machine_emergency_restart(int emergency)
16944 +static __noreturn void __machine_emergency_restart(int emergency)
16945 {
16946 reboot_emergency = emergency;
16947 machine_ops.emergency_restart();
16948 }
16949
16950 -static void native_machine_restart(char *__unused)
16951 +static __noreturn void native_machine_restart(char *__unused)
16952 {
16953 printk("machine restart\n");
16954
16955 @@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16956 __machine_emergency_restart(0);
16957 }
16958
16959 -static void native_machine_halt(void)
16960 +static __noreturn void native_machine_halt(void)
16961 {
16962 /* stop other cpus and apics */
16963 machine_shutdown();
16964 @@ -690,7 +720,7 @@ static void native_machine_halt(void)
16965 stop_this_cpu(NULL);
16966 }
16967
16968 -static void native_machine_power_off(void)
16969 +__noreturn static void native_machine_power_off(void)
16970 {
16971 if (pm_power_off) {
16972 if (!reboot_force)
16973 @@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16974 }
16975 /* a fallback in case there is no PM info available */
16976 tboot_shutdown(TB_SHUTDOWN_HALT);
16977 + unreachable();
16978 }
16979
16980 struct machine_ops machine_ops = {
16981 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16982 index 7a6f3b3..bed145d7 100644
16983 --- a/arch/x86/kernel/relocate_kernel_64.S
16984 +++ b/arch/x86/kernel/relocate_kernel_64.S
16985 @@ -11,6 +11,7 @@
16986 #include <asm/kexec.h>
16987 #include <asm/processor-flags.h>
16988 #include <asm/pgtable_types.h>
16989 +#include <asm/alternative-asm.h>
16990
16991 /*
16992 * Must be relocatable PIC code callable as a C function
16993 @@ -160,13 +161,14 @@ identity_mapped:
16994 xorq %rbp, %rbp
16995 xorq %r8, %r8
16996 xorq %r9, %r9
16997 - xorq %r10, %r9
16998 + xorq %r10, %r10
16999 xorq %r11, %r11
17000 xorq %r12, %r12
17001 xorq %r13, %r13
17002 xorq %r14, %r14
17003 xorq %r15, %r15
17004
17005 + pax_force_retaddr 0, 1
17006 ret
17007
17008 1:
17009 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
17010 index cf0ef98..e3f780b 100644
17011 --- a/arch/x86/kernel/setup.c
17012 +++ b/arch/x86/kernel/setup.c
17013 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
17014
17015 switch (data->type) {
17016 case SETUP_E820_EXT:
17017 - parse_e820_ext(data);
17018 + parse_e820_ext((struct setup_data __force_kernel *)data);
17019 break;
17020 case SETUP_DTB:
17021 add_dtb(pa_data);
17022 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
17023 * area (640->1Mb) as ram even though it is not.
17024 * take them out.
17025 */
17026 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
17027 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
17028 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
17029 }
17030
17031 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
17032
17033 if (!boot_params.hdr.root_flags)
17034 root_mountflags &= ~MS_RDONLY;
17035 - init_mm.start_code = (unsigned long) _text;
17036 - init_mm.end_code = (unsigned long) _etext;
17037 + init_mm.start_code = ktla_ktva((unsigned long) _text);
17038 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
17039 init_mm.end_data = (unsigned long) _edata;
17040 init_mm.brk = _brk_end;
17041
17042 - code_resource.start = virt_to_phys(_text);
17043 - code_resource.end = virt_to_phys(_etext)-1;
17044 - data_resource.start = virt_to_phys(_etext);
17045 + code_resource.start = virt_to_phys(ktla_ktva(_text));
17046 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
17047 + data_resource.start = virt_to_phys(_sdata);
17048 data_resource.end = virt_to_phys(_edata)-1;
17049 bss_resource.start = virt_to_phys(&__bss_start);
17050 bss_resource.end = virt_to_phys(&__bss_stop)-1;
17051 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
17052 index 71f4727..16dc9f7 100644
17053 --- a/arch/x86/kernel/setup_percpu.c
17054 +++ b/arch/x86/kernel/setup_percpu.c
17055 @@ -21,19 +21,17 @@
17056 #include <asm/cpu.h>
17057 #include <asm/stackprotector.h>
17058
17059 -DEFINE_PER_CPU(int, cpu_number);
17060 +#ifdef CONFIG_SMP
17061 +DEFINE_PER_CPU(unsigned int, cpu_number);
17062 EXPORT_PER_CPU_SYMBOL(cpu_number);
17063 +#endif
17064
17065 -#ifdef CONFIG_X86_64
17066 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
17067 -#else
17068 -#define BOOT_PERCPU_OFFSET 0
17069 -#endif
17070
17071 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
17072 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
17073
17074 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
17075 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
17076 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
17077 };
17078 EXPORT_SYMBOL(__per_cpu_offset);
17079 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
17080 {
17081 #ifdef CONFIG_X86_32
17082 struct desc_struct gdt;
17083 + unsigned long base = per_cpu_offset(cpu);
17084
17085 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
17086 - 0x2 | DESCTYPE_S, 0x8);
17087 - gdt.s = 1;
17088 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
17089 + 0x83 | DESCTYPE_S, 0xC);
17090 write_gdt_entry(get_cpu_gdt_table(cpu),
17091 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
17092 #endif
17093 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
17094 /* alrighty, percpu areas up and running */
17095 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
17096 for_each_possible_cpu(cpu) {
17097 +#ifdef CONFIG_CC_STACKPROTECTOR
17098 +#ifdef CONFIG_X86_32
17099 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
17100 +#endif
17101 +#endif
17102 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
17103 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
17104 per_cpu(cpu_number, cpu) = cpu;
17105 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
17106 */
17107 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
17108 #endif
17109 +#ifdef CONFIG_CC_STACKPROTECTOR
17110 +#ifdef CONFIG_X86_32
17111 + if (!cpu)
17112 + per_cpu(stack_canary.canary, cpu) = canary;
17113 +#endif
17114 +#endif
17115 /*
17116 * Up to this point, the boot CPU has been using .init.data
17117 * area. Reload any changed state for the boot CPU.
17118 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
17119 index 54ddaeb2..22c3bdc 100644
17120 --- a/arch/x86/kernel/signal.c
17121 +++ b/arch/x86/kernel/signal.c
17122 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
17123 * Align the stack pointer according to the i386 ABI,
17124 * i.e. so that on function entry ((sp + 4) & 15) == 0.
17125 */
17126 - sp = ((sp + 4) & -16ul) - 4;
17127 + sp = ((sp - 12) & -16ul) - 4;
17128 #else /* !CONFIG_X86_32 */
17129 sp = round_down(sp, 16) - 8;
17130 #endif
17131 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
17132 * Return an always-bogus address instead so we will die with SIGSEGV.
17133 */
17134 if (onsigstack && !likely(on_sig_stack(sp)))
17135 - return (void __user *)-1L;
17136 + return (__force void __user *)-1L;
17137
17138 /* save i387 state */
17139 if (used_math() && save_i387_xstate(*fpstate) < 0)
17140 - return (void __user *)-1L;
17141 + return (__force void __user *)-1L;
17142
17143 return (void __user *)sp;
17144 }
17145 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
17146 }
17147
17148 if (current->mm->context.vdso)
17149 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17150 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17151 else
17152 - restorer = &frame->retcode;
17153 + restorer = (void __user *)&frame->retcode;
17154 if (ka->sa.sa_flags & SA_RESTORER)
17155 restorer = ka->sa.sa_restorer;
17156
17157 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
17158 * reasons and because gdb uses it as a signature to notice
17159 * signal handler stack frames.
17160 */
17161 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
17162 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
17163
17164 if (err)
17165 return -EFAULT;
17166 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
17167 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
17168
17169 /* Set up to return from userspace. */
17170 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17171 + if (current->mm->context.vdso)
17172 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17173 + else
17174 + restorer = (void __user *)&frame->retcode;
17175 if (ka->sa.sa_flags & SA_RESTORER)
17176 restorer = ka->sa.sa_restorer;
17177 put_user_ex(restorer, &frame->pretcode);
17178 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
17179 * reasons and because gdb uses it as a signature to notice
17180 * signal handler stack frames.
17181 */
17182 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
17183 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
17184 } put_user_catch(err);
17185
17186 if (err)
17187 @@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
17188 * X86_32: vm86 regs switched out by assembly code before reaching
17189 * here, so testing against kernel CS suffices.
17190 */
17191 - if (!user_mode(regs))
17192 + if (!user_mode_novm(regs))
17193 return;
17194
17195 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
17196 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
17197 index 9f548cb..caf76f7 100644
17198 --- a/arch/x86/kernel/smpboot.c
17199 +++ b/arch/x86/kernel/smpboot.c
17200 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
17201 set_idle_for_cpu(cpu, c_idle.idle);
17202 do_rest:
17203 per_cpu(current_task, cpu) = c_idle.idle;
17204 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
17205 #ifdef CONFIG_X86_32
17206 /* Stack for startup_32 can be just as for start_secondary onwards */
17207 irq_ctx_init(cpu);
17208 #else
17209 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
17210 initial_gs = per_cpu_offset(cpu);
17211 - per_cpu(kernel_stack, cpu) =
17212 - (unsigned long)task_stack_page(c_idle.idle) -
17213 - KERNEL_STACK_OFFSET + THREAD_SIZE;
17214 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
17215 #endif
17216 +
17217 + pax_open_kernel();
17218 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17219 + pax_close_kernel();
17220 +
17221 initial_code = (unsigned long)start_secondary;
17222 stack_start = c_idle.idle->thread.sp;
17223
17224 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
17225
17226 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
17227
17228 +#ifdef CONFIG_PAX_PER_CPU_PGD
17229 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
17230 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
17231 + KERNEL_PGD_PTRS);
17232 +#endif
17233 +
17234 err = do_boot_cpu(apicid, cpu);
17235 if (err) {
17236 pr_debug("do_boot_cpu failed %d\n", err);
17237 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
17238 index c346d11..d43b163 100644
17239 --- a/arch/x86/kernel/step.c
17240 +++ b/arch/x86/kernel/step.c
17241 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17242 struct desc_struct *desc;
17243 unsigned long base;
17244
17245 - seg &= ~7UL;
17246 + seg >>= 3;
17247
17248 mutex_lock(&child->mm->context.lock);
17249 - if (unlikely((seg >> 3) >= child->mm->context.size))
17250 + if (unlikely(seg >= child->mm->context.size))
17251 addr = -1L; /* bogus selector, access would fault */
17252 else {
17253 desc = child->mm->context.ldt + seg;
17254 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17255 addr += base;
17256 }
17257 mutex_unlock(&child->mm->context.lock);
17258 - }
17259 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17260 + addr = ktla_ktva(addr);
17261
17262 return addr;
17263 }
17264 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17265 unsigned char opcode[15];
17266 unsigned long addr = convert_ip_to_linear(child, regs);
17267
17268 + if (addr == -EINVAL)
17269 + return 0;
17270 +
17271 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17272 for (i = 0; i < copied; i++) {
17273 switch (opcode[i]) {
17274 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17275 index 0b0cb5f..db6b9ed 100644
17276 --- a/arch/x86/kernel/sys_i386_32.c
17277 +++ b/arch/x86/kernel/sys_i386_32.c
17278 @@ -24,17 +24,224 @@
17279
17280 #include <asm/syscalls.h>
17281
17282 -/*
17283 - * Do a system call from kernel instead of calling sys_execve so we
17284 - * end up with proper pt_regs.
17285 - */
17286 -int kernel_execve(const char *filename,
17287 - const char *const argv[],
17288 - const char *const envp[])
17289 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17290 {
17291 - long __res;
17292 - asm volatile ("int $0x80"
17293 - : "=a" (__res)
17294 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17295 - return __res;
17296 + unsigned long pax_task_size = TASK_SIZE;
17297 +
17298 +#ifdef CONFIG_PAX_SEGMEXEC
17299 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17300 + pax_task_size = SEGMEXEC_TASK_SIZE;
17301 +#endif
17302 +
17303 + if (len > pax_task_size || addr > pax_task_size - len)
17304 + return -EINVAL;
17305 +
17306 + return 0;
17307 +}
17308 +
17309 +unsigned long
17310 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
17311 + unsigned long len, unsigned long pgoff, unsigned long flags)
17312 +{
17313 + struct mm_struct *mm = current->mm;
17314 + struct vm_area_struct *vma;
17315 + unsigned long start_addr, pax_task_size = TASK_SIZE;
17316 +
17317 +#ifdef CONFIG_PAX_SEGMEXEC
17318 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17319 + pax_task_size = SEGMEXEC_TASK_SIZE;
17320 +#endif
17321 +
17322 + pax_task_size -= PAGE_SIZE;
17323 +
17324 + if (len > pax_task_size)
17325 + return -ENOMEM;
17326 +
17327 + if (flags & MAP_FIXED)
17328 + return addr;
17329 +
17330 +#ifdef CONFIG_PAX_RANDMMAP
17331 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17332 +#endif
17333 +
17334 + if (addr) {
17335 + addr = PAGE_ALIGN(addr);
17336 + if (pax_task_size - len >= addr) {
17337 + vma = find_vma(mm, addr);
17338 + if (check_heap_stack_gap(vma, addr, len))
17339 + return addr;
17340 + }
17341 + }
17342 + if (len > mm->cached_hole_size) {
17343 + start_addr = addr = mm->free_area_cache;
17344 + } else {
17345 + start_addr = addr = mm->mmap_base;
17346 + mm->cached_hole_size = 0;
17347 + }
17348 +
17349 +#ifdef CONFIG_PAX_PAGEEXEC
17350 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17351 + start_addr = 0x00110000UL;
17352 +
17353 +#ifdef CONFIG_PAX_RANDMMAP
17354 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17355 + start_addr += mm->delta_mmap & 0x03FFF000UL;
17356 +#endif
17357 +
17358 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17359 + start_addr = addr = mm->mmap_base;
17360 + else
17361 + addr = start_addr;
17362 + }
17363 +#endif
17364 +
17365 +full_search:
17366 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17367 + /* At this point: (!vma || addr < vma->vm_end). */
17368 + if (pax_task_size - len < addr) {
17369 + /*
17370 + * Start a new search - just in case we missed
17371 + * some holes.
17372 + */
17373 + if (start_addr != mm->mmap_base) {
17374 + start_addr = addr = mm->mmap_base;
17375 + mm->cached_hole_size = 0;
17376 + goto full_search;
17377 + }
17378 + return -ENOMEM;
17379 + }
17380 + if (check_heap_stack_gap(vma, addr, len))
17381 + break;
17382 + if (addr + mm->cached_hole_size < vma->vm_start)
17383 + mm->cached_hole_size = vma->vm_start - addr;
17384 + addr = vma->vm_end;
17385 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
17386 + start_addr = addr = mm->mmap_base;
17387 + mm->cached_hole_size = 0;
17388 + goto full_search;
17389 + }
17390 + }
17391 +
17392 + /*
17393 + * Remember the place where we stopped the search:
17394 + */
17395 + mm->free_area_cache = addr + len;
17396 + return addr;
17397 +}
17398 +
17399 +unsigned long
17400 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17401 + const unsigned long len, const unsigned long pgoff,
17402 + const unsigned long flags)
17403 +{
17404 + struct vm_area_struct *vma;
17405 + struct mm_struct *mm = current->mm;
17406 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17407 +
17408 +#ifdef CONFIG_PAX_SEGMEXEC
17409 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17410 + pax_task_size = SEGMEXEC_TASK_SIZE;
17411 +#endif
17412 +
17413 + pax_task_size -= PAGE_SIZE;
17414 +
17415 + /* requested length too big for entire address space */
17416 + if (len > pax_task_size)
17417 + return -ENOMEM;
17418 +
17419 + if (flags & MAP_FIXED)
17420 + return addr;
17421 +
17422 +#ifdef CONFIG_PAX_PAGEEXEC
17423 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17424 + goto bottomup;
17425 +#endif
17426 +
17427 +#ifdef CONFIG_PAX_RANDMMAP
17428 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17429 +#endif
17430 +
17431 + /* requesting a specific address */
17432 + if (addr) {
17433 + addr = PAGE_ALIGN(addr);
17434 + if (pax_task_size - len >= addr) {
17435 + vma = find_vma(mm, addr);
17436 + if (check_heap_stack_gap(vma, addr, len))
17437 + return addr;
17438 + }
17439 + }
17440 +
17441 + /* check if free_area_cache is useful for us */
17442 + if (len <= mm->cached_hole_size) {
17443 + mm->cached_hole_size = 0;
17444 + mm->free_area_cache = mm->mmap_base;
17445 + }
17446 +
17447 + /* either no address requested or can't fit in requested address hole */
17448 + addr = mm->free_area_cache;
17449 +
17450 + /* make sure it can fit in the remaining address space */
17451 + if (addr > len) {
17452 + vma = find_vma(mm, addr-len);
17453 + if (check_heap_stack_gap(vma, addr - len, len))
17454 + /* remember the address as a hint for next time */
17455 + return (mm->free_area_cache = addr-len);
17456 + }
17457 +
17458 + if (mm->mmap_base < len)
17459 + goto bottomup;
17460 +
17461 + addr = mm->mmap_base-len;
17462 +
17463 + do {
17464 + /*
17465 + * Lookup failure means no vma is above this address,
17466 + * else if new region fits below vma->vm_start,
17467 + * return with success:
17468 + */
17469 + vma = find_vma(mm, addr);
17470 + if (check_heap_stack_gap(vma, addr, len))
17471 + /* remember the address as a hint for next time */
17472 + return (mm->free_area_cache = addr);
17473 +
17474 + /* remember the largest hole we saw so far */
17475 + if (addr + mm->cached_hole_size < vma->vm_start)
17476 + mm->cached_hole_size = vma->vm_start - addr;
17477 +
17478 + /* try just below the current vma->vm_start */
17479 + addr = skip_heap_stack_gap(vma, len);
17480 + } while (!IS_ERR_VALUE(addr));
17481 +
17482 +bottomup:
17483 + /*
17484 + * A failed mmap() very likely causes application failure,
17485 + * so fall back to the bottom-up function here. This scenario
17486 + * can happen with large stack limits and large mmap()
17487 + * allocations.
17488 + */
17489 +
17490 +#ifdef CONFIG_PAX_SEGMEXEC
17491 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17492 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17493 + else
17494 +#endif
17495 +
17496 + mm->mmap_base = TASK_UNMAPPED_BASE;
17497 +
17498 +#ifdef CONFIG_PAX_RANDMMAP
17499 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17500 + mm->mmap_base += mm->delta_mmap;
17501 +#endif
17502 +
17503 + mm->free_area_cache = mm->mmap_base;
17504 + mm->cached_hole_size = ~0UL;
17505 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17506 + /*
17507 + * Restore the topdown base:
17508 + */
17509 + mm->mmap_base = base;
17510 + mm->free_area_cache = base;
17511 + mm->cached_hole_size = ~0UL;
17512 +
17513 + return addr;
17514 }
17515 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17516 index 0514890..3dbebce 100644
17517 --- a/arch/x86/kernel/sys_x86_64.c
17518 +++ b/arch/x86/kernel/sys_x86_64.c
17519 @@ -95,8 +95,8 @@ out:
17520 return error;
17521 }
17522
17523 -static void find_start_end(unsigned long flags, unsigned long *begin,
17524 - unsigned long *end)
17525 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17526 + unsigned long *begin, unsigned long *end)
17527 {
17528 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17529 unsigned long new_begin;
17530 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17531 *begin = new_begin;
17532 }
17533 } else {
17534 - *begin = TASK_UNMAPPED_BASE;
17535 + *begin = mm->mmap_base;
17536 *end = TASK_SIZE;
17537 }
17538 }
17539 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17540 if (flags & MAP_FIXED)
17541 return addr;
17542
17543 - find_start_end(flags, &begin, &end);
17544 + find_start_end(mm, flags, &begin, &end);
17545
17546 if (len > end)
17547 return -ENOMEM;
17548
17549 +#ifdef CONFIG_PAX_RANDMMAP
17550 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17551 +#endif
17552 +
17553 if (addr) {
17554 addr = PAGE_ALIGN(addr);
17555 vma = find_vma(mm, addr);
17556 - if (end - len >= addr &&
17557 - (!vma || addr + len <= vma->vm_start))
17558 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17559 return addr;
17560 }
17561 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17562 @@ -172,7 +175,7 @@ full_search:
17563 }
17564 return -ENOMEM;
17565 }
17566 - if (!vma || addr + len <= vma->vm_start) {
17567 + if (check_heap_stack_gap(vma, addr, len)) {
17568 /*
17569 * Remember the place where we stopped the search:
17570 */
17571 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17572 {
17573 struct vm_area_struct *vma;
17574 struct mm_struct *mm = current->mm;
17575 - unsigned long addr = addr0;
17576 + unsigned long base = mm->mmap_base, addr = addr0;
17577
17578 /* requested length too big for entire address space */
17579 if (len > TASK_SIZE)
17580 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17581 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17582 goto bottomup;
17583
17584 +#ifdef CONFIG_PAX_RANDMMAP
17585 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17586 +#endif
17587 +
17588 /* requesting a specific address */
17589 if (addr) {
17590 addr = PAGE_ALIGN(addr);
17591 - vma = find_vma(mm, addr);
17592 - if (TASK_SIZE - len >= addr &&
17593 - (!vma || addr + len <= vma->vm_start))
17594 - return addr;
17595 + if (TASK_SIZE - len >= addr) {
17596 + vma = find_vma(mm, addr);
17597 + if (check_heap_stack_gap(vma, addr, len))
17598 + return addr;
17599 + }
17600 }
17601
17602 /* check if free_area_cache is useful for us */
17603 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17604 ALIGN_TOPDOWN);
17605
17606 vma = find_vma(mm, tmp_addr);
17607 - if (!vma || tmp_addr + len <= vma->vm_start)
17608 + if (check_heap_stack_gap(vma, tmp_addr, len))
17609 /* remember the address as a hint for next time */
17610 return mm->free_area_cache = tmp_addr;
17611 }
17612 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17613 * return with success:
17614 */
17615 vma = find_vma(mm, addr);
17616 - if (!vma || addr+len <= vma->vm_start)
17617 + if (check_heap_stack_gap(vma, addr, len))
17618 /* remember the address as a hint for next time */
17619 return mm->free_area_cache = addr;
17620
17621 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17622 mm->cached_hole_size = vma->vm_start - addr;
17623
17624 /* try just below the current vma->vm_start */
17625 - addr = vma->vm_start-len;
17626 - } while (len < vma->vm_start);
17627 + addr = skip_heap_stack_gap(vma, len);
17628 + } while (!IS_ERR_VALUE(addr));
17629
17630 bottomup:
17631 /*
17632 @@ -270,13 +278,21 @@ bottomup:
17633 * can happen with large stack limits and large mmap()
17634 * allocations.
17635 */
17636 + mm->mmap_base = TASK_UNMAPPED_BASE;
17637 +
17638 +#ifdef CONFIG_PAX_RANDMMAP
17639 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17640 + mm->mmap_base += mm->delta_mmap;
17641 +#endif
17642 +
17643 + mm->free_area_cache = mm->mmap_base;
17644 mm->cached_hole_size = ~0UL;
17645 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17646 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17647 /*
17648 * Restore the topdown base:
17649 */
17650 - mm->free_area_cache = mm->mmap_base;
17651 + mm->mmap_base = base;
17652 + mm->free_area_cache = base;
17653 mm->cached_hole_size = ~0UL;
17654
17655 return addr;
17656 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17657 index 9a0e312..e6f66f2 100644
17658 --- a/arch/x86/kernel/syscall_table_32.S
17659 +++ b/arch/x86/kernel/syscall_table_32.S
17660 @@ -1,3 +1,4 @@
17661 +.section .rodata,"a",@progbits
17662 ENTRY(sys_call_table)
17663 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17664 .long sys_exit
17665 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17666 index e2410e2..4fe3fbc 100644
17667 --- a/arch/x86/kernel/tboot.c
17668 +++ b/arch/x86/kernel/tboot.c
17669 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17670
17671 void tboot_shutdown(u32 shutdown_type)
17672 {
17673 - void (*shutdown)(void);
17674 + void (* __noreturn shutdown)(void);
17675
17676 if (!tboot_enabled())
17677 return;
17678 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17679
17680 switch_to_tboot_pt();
17681
17682 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17683 + shutdown = (void *)tboot->shutdown_entry;
17684 shutdown();
17685
17686 /* should not reach here */
17687 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17688 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17689 }
17690
17691 -static atomic_t ap_wfs_count;
17692 +static atomic_unchecked_t ap_wfs_count;
17693
17694 static int tboot_wait_for_aps(int num_aps)
17695 {
17696 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17697 {
17698 switch (action) {
17699 case CPU_DYING:
17700 - atomic_inc(&ap_wfs_count);
17701 + atomic_inc_unchecked(&ap_wfs_count);
17702 if (num_online_cpus() == 1)
17703 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17704 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17705 return NOTIFY_BAD;
17706 break;
17707 }
17708 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17709
17710 tboot_create_trampoline();
17711
17712 - atomic_set(&ap_wfs_count, 0);
17713 + atomic_set_unchecked(&ap_wfs_count, 0);
17714 register_hotcpu_notifier(&tboot_cpu_notifier);
17715 return 0;
17716 }
17717 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17718 index dd5fbf4..b7f2232 100644
17719 --- a/arch/x86/kernel/time.c
17720 +++ b/arch/x86/kernel/time.c
17721 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17722 {
17723 unsigned long pc = instruction_pointer(regs);
17724
17725 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17726 + if (!user_mode(regs) && in_lock_functions(pc)) {
17727 #ifdef CONFIG_FRAME_POINTER
17728 - return *(unsigned long *)(regs->bp + sizeof(long));
17729 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17730 #else
17731 unsigned long *sp =
17732 (unsigned long *)kernel_stack_pointer(regs);
17733 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17734 * or above a saved flags. Eflags has bits 22-31 zero,
17735 * kernel addresses don't.
17736 */
17737 +
17738 +#ifdef CONFIG_PAX_KERNEXEC
17739 + return ktla_ktva(sp[0]);
17740 +#else
17741 if (sp[0] >> 22)
17742 return sp[0];
17743 if (sp[1] >> 22)
17744 return sp[1];
17745 #endif
17746 +
17747 +#endif
17748 }
17749 return pc;
17750 }
17751 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17752 index 6bb7b85..dd853e1 100644
17753 --- a/arch/x86/kernel/tls.c
17754 +++ b/arch/x86/kernel/tls.c
17755 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17756 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17757 return -EINVAL;
17758
17759 +#ifdef CONFIG_PAX_SEGMEXEC
17760 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17761 + return -EINVAL;
17762 +#endif
17763 +
17764 set_tls_desc(p, idx, &info, 1);
17765
17766 return 0;
17767 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17768 index 451c0a7..e57f551 100644
17769 --- a/arch/x86/kernel/trampoline_32.S
17770 +++ b/arch/x86/kernel/trampoline_32.S
17771 @@ -32,6 +32,12 @@
17772 #include <asm/segment.h>
17773 #include <asm/page_types.h>
17774
17775 +#ifdef CONFIG_PAX_KERNEXEC
17776 +#define ta(X) (X)
17777 +#else
17778 +#define ta(X) ((X) - __PAGE_OFFSET)
17779 +#endif
17780 +
17781 #ifdef CONFIG_SMP
17782
17783 .section ".x86_trampoline","a"
17784 @@ -62,7 +68,7 @@ r_base = .
17785 inc %ax # protected mode (PE) bit
17786 lmsw %ax # into protected mode
17787 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17788 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17789 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17790
17791 # These need to be in the same 64K segment as the above;
17792 # hence we don't use the boot_gdt_descr defined in head.S
17793 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17794 index 09ff517..df19fbff 100644
17795 --- a/arch/x86/kernel/trampoline_64.S
17796 +++ b/arch/x86/kernel/trampoline_64.S
17797 @@ -90,7 +90,7 @@ startup_32:
17798 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17799 movl %eax, %ds
17800
17801 - movl $X86_CR4_PAE, %eax
17802 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17803 movl %eax, %cr4 # Enable PAE mode
17804
17805 # Setup trampoline 4 level pagetables
17806 @@ -138,7 +138,7 @@ tidt:
17807 # so the kernel can live anywhere
17808 .balign 4
17809 tgdt:
17810 - .short tgdt_end - tgdt # gdt limit
17811 + .short tgdt_end - tgdt - 1 # gdt limit
17812 .long tgdt - r_base
17813 .short 0
17814 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17815 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17816 index a8e3eb8..c9dbd7d 100644
17817 --- a/arch/x86/kernel/traps.c
17818 +++ b/arch/x86/kernel/traps.c
17819 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17820
17821 /* Do we ignore FPU interrupts ? */
17822 char ignore_fpu_irq;
17823 -
17824 -/*
17825 - * The IDT has to be page-aligned to simplify the Pentium
17826 - * F0 0F bug workaround.
17827 - */
17828 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17829 #endif
17830
17831 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17832 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17833 }
17834
17835 static void __kprobes
17836 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17837 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17838 long error_code, siginfo_t *info)
17839 {
17840 struct task_struct *tsk = current;
17841
17842 #ifdef CONFIG_X86_32
17843 - if (regs->flags & X86_VM_MASK) {
17844 + if (v8086_mode(regs)) {
17845 /*
17846 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17847 * On nmi (interrupt 2), do_trap should not be called.
17848 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17849 }
17850 #endif
17851
17852 - if (!user_mode(regs))
17853 + if (!user_mode_novm(regs))
17854 goto kernel_trap;
17855
17856 #ifdef CONFIG_X86_32
17857 @@ -148,7 +142,7 @@ trap_signal:
17858 printk_ratelimit()) {
17859 printk(KERN_INFO
17860 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17861 - tsk->comm, tsk->pid, str,
17862 + tsk->comm, task_pid_nr(tsk), str,
17863 regs->ip, regs->sp, error_code);
17864 print_vma_addr(" in ", regs->ip);
17865 printk("\n");
17866 @@ -165,8 +159,20 @@ kernel_trap:
17867 if (!fixup_exception(regs)) {
17868 tsk->thread.error_code = error_code;
17869 tsk->thread.trap_no = trapnr;
17870 +
17871 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17872 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17873 + str = "PAX: suspicious stack segment fault";
17874 +#endif
17875 +
17876 die(str, regs, error_code);
17877 }
17878 +
17879 +#ifdef CONFIG_PAX_REFCOUNT
17880 + if (trapnr == 4)
17881 + pax_report_refcount_overflow(regs);
17882 +#endif
17883 +
17884 return;
17885
17886 #ifdef CONFIG_X86_32
17887 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17888 conditional_sti(regs);
17889
17890 #ifdef CONFIG_X86_32
17891 - if (regs->flags & X86_VM_MASK)
17892 + if (v8086_mode(regs))
17893 goto gp_in_vm86;
17894 #endif
17895
17896 tsk = current;
17897 - if (!user_mode(regs))
17898 + if (!user_mode_novm(regs))
17899 goto gp_in_kernel;
17900
17901 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17902 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17903 + struct mm_struct *mm = tsk->mm;
17904 + unsigned long limit;
17905 +
17906 + down_write(&mm->mmap_sem);
17907 + limit = mm->context.user_cs_limit;
17908 + if (limit < TASK_SIZE) {
17909 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17910 + up_write(&mm->mmap_sem);
17911 + return;
17912 + }
17913 + up_write(&mm->mmap_sem);
17914 + }
17915 +#endif
17916 +
17917 tsk->thread.error_code = error_code;
17918 tsk->thread.trap_no = 13;
17919
17920 @@ -295,6 +317,13 @@ gp_in_kernel:
17921 if (notify_die(DIE_GPF, "general protection fault", regs,
17922 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17923 return;
17924 +
17925 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17926 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17927 + die("PAX: suspicious general protection fault", regs, error_code);
17928 + else
17929 +#endif
17930 +
17931 die("general protection fault", regs, error_code);
17932 }
17933
17934 @@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17935 /* It's safe to allow irq's after DR6 has been saved */
17936 preempt_conditional_sti(regs);
17937
17938 - if (regs->flags & X86_VM_MASK) {
17939 + if (v8086_mode(regs)) {
17940 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17941 error_code, 1);
17942 preempt_conditional_cli(regs);
17943 @@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17944 * We already checked v86 mode above, so we can check for kernel mode
17945 * by just checking the CPL of CS.
17946 */
17947 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
17948 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17949 tsk->thread.debugreg6 &= ~DR_STEP;
17950 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17951 regs->flags &= ~X86_EFLAGS_TF;
17952 @@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17953 return;
17954 conditional_sti(regs);
17955
17956 - if (!user_mode_vm(regs))
17957 + if (!user_mode(regs))
17958 {
17959 if (!fixup_exception(regs)) {
17960 task->thread.error_code = error_code;
17961 @@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17962 void __math_state_restore(void)
17963 {
17964 struct thread_info *thread = current_thread_info();
17965 - struct task_struct *tsk = thread->task;
17966 + struct task_struct *tsk = current;
17967
17968 /*
17969 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17970 @@ -595,8 +624,7 @@ void __math_state_restore(void)
17971 */
17972 asmlinkage void math_state_restore(void)
17973 {
17974 - struct thread_info *thread = current_thread_info();
17975 - struct task_struct *tsk = thread->task;
17976 + struct task_struct *tsk = current;
17977
17978 if (!tsk_used_math(tsk)) {
17979 local_irq_enable();
17980 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17981 index b9242ba..50c5edd 100644
17982 --- a/arch/x86/kernel/verify_cpu.S
17983 +++ b/arch/x86/kernel/verify_cpu.S
17984 @@ -20,6 +20,7 @@
17985 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17986 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17987 * arch/x86/kernel/head_32.S: processor startup
17988 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17989 *
17990 * verify_cpu, returns the status of longmode and SSE in register %eax.
17991 * 0: Success 1: Failure
17992 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17993 index 863f875..4307295 100644
17994 --- a/arch/x86/kernel/vm86_32.c
17995 +++ b/arch/x86/kernel/vm86_32.c
17996 @@ -41,6 +41,7 @@
17997 #include <linux/ptrace.h>
17998 #include <linux/audit.h>
17999 #include <linux/stddef.h>
18000 +#include <linux/grsecurity.h>
18001
18002 #include <asm/uaccess.h>
18003 #include <asm/io.h>
18004 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
18005 do_exit(SIGSEGV);
18006 }
18007
18008 - tss = &per_cpu(init_tss, get_cpu());
18009 + tss = init_tss + get_cpu();
18010 current->thread.sp0 = current->thread.saved_sp0;
18011 current->thread.sysenter_cs = __KERNEL_CS;
18012 load_sp0(tss, &current->thread);
18013 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
18014 struct task_struct *tsk;
18015 int tmp, ret = -EPERM;
18016
18017 +#ifdef CONFIG_GRKERNSEC_VM86
18018 + if (!capable(CAP_SYS_RAWIO)) {
18019 + gr_handle_vm86();
18020 + goto out;
18021 + }
18022 +#endif
18023 +
18024 tsk = current;
18025 if (tsk->thread.saved_sp0)
18026 goto out;
18027 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
18028 int tmp, ret;
18029 struct vm86plus_struct __user *v86;
18030
18031 +#ifdef CONFIG_GRKERNSEC_VM86
18032 + if (!capable(CAP_SYS_RAWIO)) {
18033 + gr_handle_vm86();
18034 + ret = -EPERM;
18035 + goto out;
18036 + }
18037 +#endif
18038 +
18039 tsk = current;
18040 switch (cmd) {
18041 case VM86_REQUEST_IRQ:
18042 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
18043 tsk->thread.saved_fs = info->regs32->fs;
18044 tsk->thread.saved_gs = get_user_gs(info->regs32);
18045
18046 - tss = &per_cpu(init_tss, get_cpu());
18047 + tss = init_tss + get_cpu();
18048 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18049 if (cpu_has_sep)
18050 tsk->thread.sysenter_cs = 0;
18051 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
18052 goto cannot_handle;
18053 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18054 goto cannot_handle;
18055 - intr_ptr = (unsigned long __user *) (i << 2);
18056 + intr_ptr = (__force unsigned long __user *) (i << 2);
18057 if (get_user(segoffs, intr_ptr))
18058 goto cannot_handle;
18059 if ((segoffs >> 16) == BIOSSEG)
18060 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
18061 index 0f703f1..9e15f64 100644
18062 --- a/arch/x86/kernel/vmlinux.lds.S
18063 +++ b/arch/x86/kernel/vmlinux.lds.S
18064 @@ -26,6 +26,13 @@
18065 #include <asm/page_types.h>
18066 #include <asm/cache.h>
18067 #include <asm/boot.h>
18068 +#include <asm/segment.h>
18069 +
18070 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18071 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18072 +#else
18073 +#define __KERNEL_TEXT_OFFSET 0
18074 +#endif
18075
18076 #undef i386 /* in case the preprocessor is a 32bit one */
18077
18078 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
18079
18080 PHDRS {
18081 text PT_LOAD FLAGS(5); /* R_E */
18082 +#ifdef CONFIG_X86_32
18083 + module PT_LOAD FLAGS(5); /* R_E */
18084 +#endif
18085 +#ifdef CONFIG_XEN
18086 + rodata PT_LOAD FLAGS(5); /* R_E */
18087 +#else
18088 + rodata PT_LOAD FLAGS(4); /* R__ */
18089 +#endif
18090 data PT_LOAD FLAGS(6); /* RW_ */
18091 -#ifdef CONFIG_X86_64
18092 + init.begin PT_LOAD FLAGS(6); /* RW_ */
18093 #ifdef CONFIG_SMP
18094 percpu PT_LOAD FLAGS(6); /* RW_ */
18095 #endif
18096 + text.init PT_LOAD FLAGS(5); /* R_E */
18097 + text.exit PT_LOAD FLAGS(5); /* R_E */
18098 init PT_LOAD FLAGS(7); /* RWE */
18099 -#endif
18100 note PT_NOTE FLAGS(0); /* ___ */
18101 }
18102
18103 SECTIONS
18104 {
18105 #ifdef CONFIG_X86_32
18106 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18107 - phys_startup_32 = startup_32 - LOAD_OFFSET;
18108 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18109 #else
18110 - . = __START_KERNEL;
18111 - phys_startup_64 = startup_64 - LOAD_OFFSET;
18112 + . = __START_KERNEL;
18113 #endif
18114
18115 /* Text and read-only data */
18116 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
18117 - _text = .;
18118 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18119 /* bootstrapping code */
18120 +#ifdef CONFIG_X86_32
18121 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18122 +#else
18123 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18124 +#endif
18125 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18126 + _text = .;
18127 HEAD_TEXT
18128 #ifdef CONFIG_X86_32
18129 . = ALIGN(PAGE_SIZE);
18130 @@ -108,13 +128,47 @@ SECTIONS
18131 IRQENTRY_TEXT
18132 *(.fixup)
18133 *(.gnu.warning)
18134 - /* End of text section */
18135 - _etext = .;
18136 } :text = 0x9090
18137
18138 - NOTES :text :note
18139 + . += __KERNEL_TEXT_OFFSET;
18140
18141 - EXCEPTION_TABLE(16) :text = 0x9090
18142 +#ifdef CONFIG_X86_32
18143 + . = ALIGN(PAGE_SIZE);
18144 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18145 +
18146 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18147 + MODULES_EXEC_VADDR = .;
18148 + BYTE(0)
18149 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18150 + . = ALIGN(HPAGE_SIZE);
18151 + MODULES_EXEC_END = . - 1;
18152 +#endif
18153 +
18154 + } :module
18155 +#endif
18156 +
18157 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18158 + /* End of text section */
18159 + _etext = . - __KERNEL_TEXT_OFFSET;
18160 + }
18161 +
18162 +#ifdef CONFIG_X86_32
18163 + . = ALIGN(PAGE_SIZE);
18164 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18165 + *(.idt)
18166 + . = ALIGN(PAGE_SIZE);
18167 + *(.empty_zero_page)
18168 + *(.initial_pg_fixmap)
18169 + *(.initial_pg_pmd)
18170 + *(.initial_page_table)
18171 + *(.swapper_pg_dir)
18172 + } :rodata
18173 +#endif
18174 +
18175 + . = ALIGN(PAGE_SIZE);
18176 + NOTES :rodata :note
18177 +
18178 + EXCEPTION_TABLE(16) :rodata
18179
18180 #if defined(CONFIG_DEBUG_RODATA)
18181 /* .text should occupy whole number of pages */
18182 @@ -126,16 +180,20 @@ SECTIONS
18183
18184 /* Data */
18185 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18186 +
18187 +#ifdef CONFIG_PAX_KERNEXEC
18188 + . = ALIGN(HPAGE_SIZE);
18189 +#else
18190 + . = ALIGN(PAGE_SIZE);
18191 +#endif
18192 +
18193 /* Start of data section */
18194 _sdata = .;
18195
18196 /* init_task */
18197 INIT_TASK_DATA(THREAD_SIZE)
18198
18199 -#ifdef CONFIG_X86_32
18200 - /* 32 bit has nosave before _edata */
18201 NOSAVE_DATA
18202 -#endif
18203
18204 PAGE_ALIGNED_DATA(PAGE_SIZE)
18205
18206 @@ -176,12 +234,19 @@ SECTIONS
18207 #endif /* CONFIG_X86_64 */
18208
18209 /* Init code and data - will be freed after init */
18210 - . = ALIGN(PAGE_SIZE);
18211 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18212 + BYTE(0)
18213 +
18214 +#ifdef CONFIG_PAX_KERNEXEC
18215 + . = ALIGN(HPAGE_SIZE);
18216 +#else
18217 + . = ALIGN(PAGE_SIZE);
18218 +#endif
18219 +
18220 __init_begin = .; /* paired with __init_end */
18221 - }
18222 + } :init.begin
18223
18224 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18225 +#ifdef CONFIG_SMP
18226 /*
18227 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18228 * output PHDR, so the next output section - .init.text - should
18229 @@ -190,12 +255,27 @@ SECTIONS
18230 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
18231 #endif
18232
18233 - INIT_TEXT_SECTION(PAGE_SIZE)
18234 -#ifdef CONFIG_X86_64
18235 - :init
18236 -#endif
18237 + . = ALIGN(PAGE_SIZE);
18238 + init_begin = .;
18239 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18240 + VMLINUX_SYMBOL(_sinittext) = .;
18241 + INIT_TEXT
18242 + VMLINUX_SYMBOL(_einittext) = .;
18243 + . = ALIGN(PAGE_SIZE);
18244 + } :text.init
18245
18246 - INIT_DATA_SECTION(16)
18247 + /*
18248 + * .exit.text is discard at runtime, not link time, to deal with
18249 + * references from .altinstructions and .eh_frame
18250 + */
18251 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18252 + EXIT_TEXT
18253 + . = ALIGN(16);
18254 + } :text.exit
18255 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18256 +
18257 + . = ALIGN(PAGE_SIZE);
18258 + INIT_DATA_SECTION(16) :init
18259
18260 /*
18261 * Code and data for a variety of lowlevel trampolines, to be
18262 @@ -269,19 +349,12 @@ SECTIONS
18263 }
18264
18265 . = ALIGN(8);
18266 - /*
18267 - * .exit.text is discard at runtime, not link time, to deal with
18268 - * references from .altinstructions and .eh_frame
18269 - */
18270 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18271 - EXIT_TEXT
18272 - }
18273
18274 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18275 EXIT_DATA
18276 }
18277
18278 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18279 +#ifndef CONFIG_SMP
18280 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18281 #endif
18282
18283 @@ -300,16 +373,10 @@ SECTIONS
18284 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18285 __smp_locks = .;
18286 *(.smp_locks)
18287 - . = ALIGN(PAGE_SIZE);
18288 __smp_locks_end = .;
18289 + . = ALIGN(PAGE_SIZE);
18290 }
18291
18292 -#ifdef CONFIG_X86_64
18293 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18294 - NOSAVE_DATA
18295 - }
18296 -#endif
18297 -
18298 /* BSS */
18299 . = ALIGN(PAGE_SIZE);
18300 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18301 @@ -325,6 +392,7 @@ SECTIONS
18302 __brk_base = .;
18303 . += 64 * 1024; /* 64k alignment slop space */
18304 *(.brk_reservation) /* areas brk users have reserved */
18305 + . = ALIGN(HPAGE_SIZE);
18306 __brk_limit = .;
18307 }
18308
18309 @@ -351,13 +419,12 @@ SECTIONS
18310 * for the boot processor.
18311 */
18312 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18313 -INIT_PER_CPU(gdt_page);
18314 INIT_PER_CPU(irq_stack_union);
18315
18316 /*
18317 * Build-time check on the image size:
18318 */
18319 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18320 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18321 "kernel image bigger than KERNEL_IMAGE_SIZE");
18322
18323 #ifdef CONFIG_SMP
18324 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18325 index e4d4a22..47ee71f 100644
18326 --- a/arch/x86/kernel/vsyscall_64.c
18327 +++ b/arch/x86/kernel/vsyscall_64.c
18328 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18329 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18330 };
18331
18332 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18333 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18334
18335 static int __init vsyscall_setup(char *str)
18336 {
18337 if (str) {
18338 if (!strcmp("emulate", str))
18339 vsyscall_mode = EMULATE;
18340 - else if (!strcmp("native", str))
18341 - vsyscall_mode = NATIVE;
18342 else if (!strcmp("none", str))
18343 vsyscall_mode = NONE;
18344 else
18345 @@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18346
18347 tsk = current;
18348 if (seccomp_mode(&tsk->seccomp))
18349 - do_exit(SIGKILL);
18350 + do_group_exit(SIGKILL);
18351
18352 switch (vsyscall_nr) {
18353 case 0:
18354 @@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18355 return true;
18356
18357 sigsegv:
18358 - force_sig(SIGSEGV, current);
18359 - return true;
18360 + do_group_exit(SIGKILL);
18361 }
18362
18363 /*
18364 @@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18365 extern char __vvar_page;
18366 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18367
18368 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18369 - vsyscall_mode == NATIVE
18370 - ? PAGE_KERNEL_VSYSCALL
18371 - : PAGE_KERNEL_VVAR);
18372 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18373 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18374 (unsigned long)VSYSCALL_START);
18375
18376 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18377 index 9796c2f..f686fbf 100644
18378 --- a/arch/x86/kernel/x8664_ksyms_64.c
18379 +++ b/arch/x86/kernel/x8664_ksyms_64.c
18380 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18381 EXPORT_SYMBOL(copy_user_generic_string);
18382 EXPORT_SYMBOL(copy_user_generic_unrolled);
18383 EXPORT_SYMBOL(__copy_user_nocache);
18384 -EXPORT_SYMBOL(_copy_from_user);
18385 -EXPORT_SYMBOL(_copy_to_user);
18386
18387 EXPORT_SYMBOL(copy_page);
18388 EXPORT_SYMBOL(clear_page);
18389 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18390 index a391134..d0b63b6e 100644
18391 --- a/arch/x86/kernel/xsave.c
18392 +++ b/arch/x86/kernel/xsave.c
18393 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18394 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18395 return -EINVAL;
18396
18397 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18398 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18399 fx_sw_user->extended_size -
18400 FP_XSTATE_MAGIC2_SIZE));
18401 if (err)
18402 @@ -267,7 +267,7 @@ fx_only:
18403 * the other extended state.
18404 */
18405 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18406 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18407 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18408 }
18409
18410 /*
18411 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18412 if (use_xsave())
18413 err = restore_user_xstate(buf);
18414 else
18415 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18416 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18417 buf);
18418 if (unlikely(err)) {
18419 /*
18420 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18421 index f1e3be1..588efc8 100644
18422 --- a/arch/x86/kvm/emulate.c
18423 +++ b/arch/x86/kvm/emulate.c
18424 @@ -249,6 +249,7 @@ struct gprefix {
18425
18426 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18427 do { \
18428 + unsigned long _tmp; \
18429 __asm__ __volatile__ ( \
18430 _PRE_EFLAGS("0", "4", "2") \
18431 _op _suffix " %"_x"3,%1; " \
18432 @@ -263,8 +264,6 @@ struct gprefix {
18433 /* Raw emulation: instruction has two explicit operands. */
18434 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18435 do { \
18436 - unsigned long _tmp; \
18437 - \
18438 switch ((ctxt)->dst.bytes) { \
18439 case 2: \
18440 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18441 @@ -280,7 +279,6 @@ struct gprefix {
18442
18443 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18444 do { \
18445 - unsigned long _tmp; \
18446 switch ((ctxt)->dst.bytes) { \
18447 case 1: \
18448 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18449 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18450 index 54abb40..a192606 100644
18451 --- a/arch/x86/kvm/lapic.c
18452 +++ b/arch/x86/kvm/lapic.c
18453 @@ -53,7 +53,7 @@
18454 #define APIC_BUS_CYCLE_NS 1
18455
18456 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18457 -#define apic_debug(fmt, arg...)
18458 +#define apic_debug(fmt, arg...) do {} while (0)
18459
18460 #define APIC_LVT_NUM 6
18461 /* 14 is the version for Xeon and Pentium 8.4.8*/
18462 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18463 index f1b36cf..af8a124 100644
18464 --- a/arch/x86/kvm/mmu.c
18465 +++ b/arch/x86/kvm/mmu.c
18466 @@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18467
18468 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18469
18470 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18471 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18472
18473 /*
18474 * Assume that the pte write on a page table of the same type
18475 @@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18476 }
18477
18478 spin_lock(&vcpu->kvm->mmu_lock);
18479 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18480 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18481 gentry = 0;
18482 kvm_mmu_free_some_pages(vcpu);
18483 ++vcpu->kvm->stat.mmu_pte_write;
18484 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18485 index 9299410..ade2f9b 100644
18486 --- a/arch/x86/kvm/paging_tmpl.h
18487 +++ b/arch/x86/kvm/paging_tmpl.h
18488 @@ -197,7 +197,7 @@ retry_walk:
18489 if (unlikely(kvm_is_error_hva(host_addr)))
18490 goto error;
18491
18492 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18493 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18494 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18495 goto error;
18496
18497 @@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18498 if (need_flush)
18499 kvm_flush_remote_tlbs(vcpu->kvm);
18500
18501 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18502 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18503
18504 spin_unlock(&vcpu->kvm->mmu_lock);
18505
18506 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18507 index e32243e..a6e6172 100644
18508 --- a/arch/x86/kvm/svm.c
18509 +++ b/arch/x86/kvm/svm.c
18510 @@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18511 int cpu = raw_smp_processor_id();
18512
18513 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18514 +
18515 + pax_open_kernel();
18516 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18517 + pax_close_kernel();
18518 +
18519 load_TR_desc();
18520 }
18521
18522 @@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18523 #endif
18524 #endif
18525
18526 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18527 + __set_fs(current_thread_info()->addr_limit);
18528 +#endif
18529 +
18530 reload_tss(vcpu);
18531
18532 local_irq_disable();
18533 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18534 index 579a0b5..ed7bbf9 100644
18535 --- a/arch/x86/kvm/vmx.c
18536 +++ b/arch/x86/kvm/vmx.c
18537 @@ -1305,7 +1305,11 @@ static void reload_tss(void)
18538 struct desc_struct *descs;
18539
18540 descs = (void *)gdt->address;
18541 +
18542 + pax_open_kernel();
18543 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18544 + pax_close_kernel();
18545 +
18546 load_TR_desc();
18547 }
18548
18549 @@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18550 if (!cpu_has_vmx_flexpriority())
18551 flexpriority_enabled = 0;
18552
18553 - if (!cpu_has_vmx_tpr_shadow())
18554 - kvm_x86_ops->update_cr8_intercept = NULL;
18555 + if (!cpu_has_vmx_tpr_shadow()) {
18556 + pax_open_kernel();
18557 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18558 + pax_close_kernel();
18559 + }
18560
18561 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18562 kvm_disable_largepages();
18563 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18564 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18565
18566 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18567 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18568 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18569
18570 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18571 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18572 @@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18573 "jmp .Lkvm_vmx_return \n\t"
18574 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18575 ".Lkvm_vmx_return: "
18576 +
18577 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18578 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18579 + ".Lkvm_vmx_return2: "
18580 +#endif
18581 +
18582 /* Save guest registers, load host registers, keep flags */
18583 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18584 "pop %0 \n\t"
18585 @@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18586 #endif
18587 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18588 [wordsize]"i"(sizeof(ulong))
18589 +
18590 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18591 + ,[cs]"i"(__KERNEL_CS)
18592 +#endif
18593 +
18594 : "cc", "memory"
18595 , R"ax", R"bx", R"di", R"si"
18596 #ifdef CONFIG_X86_64
18597 @@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18598 }
18599 }
18600
18601 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18602 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18603 +
18604 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18605 + loadsegment(fs, __KERNEL_PERCPU);
18606 +#endif
18607 +
18608 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18609 + __set_fs(current_thread_info()->addr_limit);
18610 +#endif
18611 +
18612 vmx->loaded_vmcs->launched = 1;
18613
18614 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18615 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18616 index 4c938da..4ddef65 100644
18617 --- a/arch/x86/kvm/x86.c
18618 +++ b/arch/x86/kvm/x86.c
18619 @@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18620 {
18621 struct kvm *kvm = vcpu->kvm;
18622 int lm = is_long_mode(vcpu);
18623 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18624 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18625 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18626 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18627 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18628 : kvm->arch.xen_hvm_config.blob_size_32;
18629 u32 page_num = data & ~PAGE_MASK;
18630 @@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18631 if (n < msr_list.nmsrs)
18632 goto out;
18633 r = -EFAULT;
18634 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18635 + goto out;
18636 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18637 num_msrs_to_save * sizeof(u32)))
18638 goto out;
18639 @@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18640 struct kvm_cpuid2 *cpuid,
18641 struct kvm_cpuid_entry2 __user *entries)
18642 {
18643 - int r;
18644 + int r, i;
18645
18646 r = -E2BIG;
18647 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18648 goto out;
18649 r = -EFAULT;
18650 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18651 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18652 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18653 goto out;
18654 + for (i = 0; i < cpuid->nent; ++i) {
18655 + struct kvm_cpuid_entry2 cpuid_entry;
18656 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18657 + goto out;
18658 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18659 + }
18660 vcpu->arch.cpuid_nent = cpuid->nent;
18661 kvm_apic_set_version(vcpu);
18662 kvm_x86_ops->cpuid_update(vcpu);
18663 @@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18664 struct kvm_cpuid2 *cpuid,
18665 struct kvm_cpuid_entry2 __user *entries)
18666 {
18667 - int r;
18668 + int r, i;
18669
18670 r = -E2BIG;
18671 if (cpuid->nent < vcpu->arch.cpuid_nent)
18672 goto out;
18673 r = -EFAULT;
18674 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18675 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18676 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18677 goto out;
18678 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18679 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18680 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18681 + goto out;
18682 + }
18683 return 0;
18684
18685 out:
18686 @@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18687 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18688 struct kvm_interrupt *irq)
18689 {
18690 - if (irq->irq < 0 || irq->irq >= 256)
18691 + if (irq->irq >= 256)
18692 return -EINVAL;
18693 if (irqchip_in_kernel(vcpu->kvm))
18694 return -ENXIO;
18695 @@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18696 kvm_mmu_set_mmio_spte_mask(mask);
18697 }
18698
18699 -int kvm_arch_init(void *opaque)
18700 +int kvm_arch_init(const void *opaque)
18701 {
18702 int r;
18703 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18704 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18705 index cf4603b..7cdde38 100644
18706 --- a/arch/x86/lguest/boot.c
18707 +++ b/arch/x86/lguest/boot.c
18708 @@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18709 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18710 * Launcher to reboot us.
18711 */
18712 -static void lguest_restart(char *reason)
18713 +static __noreturn void lguest_restart(char *reason)
18714 {
18715 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18716 + BUG();
18717 }
18718
18719 /*G:050
18720 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18721 index 042f682..c92afb6 100644
18722 --- a/arch/x86/lib/atomic64_32.c
18723 +++ b/arch/x86/lib/atomic64_32.c
18724 @@ -8,18 +8,30 @@
18725
18726 long long atomic64_read_cx8(long long, const atomic64_t *v);
18727 EXPORT_SYMBOL(atomic64_read_cx8);
18728 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18729 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18730 long long atomic64_set_cx8(long long, const atomic64_t *v);
18731 EXPORT_SYMBOL(atomic64_set_cx8);
18732 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18733 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18734 long long atomic64_xchg_cx8(long long, unsigned high);
18735 EXPORT_SYMBOL(atomic64_xchg_cx8);
18736 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18737 EXPORT_SYMBOL(atomic64_add_return_cx8);
18738 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18739 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18740 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18741 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18742 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18743 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18744 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18745 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18746 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18747 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18748 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18749 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18750 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18751 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18752 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18753 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18754 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18755 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18756 #ifndef CONFIG_X86_CMPXCHG64
18757 long long atomic64_read_386(long long, const atomic64_t *v);
18758 EXPORT_SYMBOL(atomic64_read_386);
18759 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18760 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
18761 long long atomic64_set_386(long long, const atomic64_t *v);
18762 EXPORT_SYMBOL(atomic64_set_386);
18763 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18764 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
18765 long long atomic64_xchg_386(long long, unsigned high);
18766 EXPORT_SYMBOL(atomic64_xchg_386);
18767 long long atomic64_add_return_386(long long a, atomic64_t *v);
18768 EXPORT_SYMBOL(atomic64_add_return_386);
18769 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18770 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18771 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18772 EXPORT_SYMBOL(atomic64_sub_return_386);
18773 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18774 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18775 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18776 EXPORT_SYMBOL(atomic64_inc_return_386);
18777 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18778 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18779 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18780 EXPORT_SYMBOL(atomic64_dec_return_386);
18781 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18782 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18783 long long atomic64_add_386(long long a, atomic64_t *v);
18784 EXPORT_SYMBOL(atomic64_add_386);
18785 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18786 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
18787 long long atomic64_sub_386(long long a, atomic64_t *v);
18788 EXPORT_SYMBOL(atomic64_sub_386);
18789 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18790 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18791 long long atomic64_inc_386(long long a, atomic64_t *v);
18792 EXPORT_SYMBOL(atomic64_inc_386);
18793 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18794 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18795 long long atomic64_dec_386(long long a, atomic64_t *v);
18796 EXPORT_SYMBOL(atomic64_dec_386);
18797 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18798 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18799 long long atomic64_dec_if_positive_386(atomic64_t *v);
18800 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18801 int atomic64_inc_not_zero_386(atomic64_t *v);
18802 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18803 index e8e7e0d..56fd1b0 100644
18804 --- a/arch/x86/lib/atomic64_386_32.S
18805 +++ b/arch/x86/lib/atomic64_386_32.S
18806 @@ -48,6 +48,10 @@ BEGIN(read)
18807 movl (v), %eax
18808 movl 4(v), %edx
18809 RET_ENDP
18810 +BEGIN(read_unchecked)
18811 + movl (v), %eax
18812 + movl 4(v), %edx
18813 +RET_ENDP
18814 #undef v
18815
18816 #define v %esi
18817 @@ -55,6 +59,10 @@ BEGIN(set)
18818 movl %ebx, (v)
18819 movl %ecx, 4(v)
18820 RET_ENDP
18821 +BEGIN(set_unchecked)
18822 + movl %ebx, (v)
18823 + movl %ecx, 4(v)
18824 +RET_ENDP
18825 #undef v
18826
18827 #define v %esi
18828 @@ -70,6 +78,20 @@ RET_ENDP
18829 BEGIN(add)
18830 addl %eax, (v)
18831 adcl %edx, 4(v)
18832 +
18833 +#ifdef CONFIG_PAX_REFCOUNT
18834 + jno 0f
18835 + subl %eax, (v)
18836 + sbbl %edx, 4(v)
18837 + int $4
18838 +0:
18839 + _ASM_EXTABLE(0b, 0b)
18840 +#endif
18841 +
18842 +RET_ENDP
18843 +BEGIN(add_unchecked)
18844 + addl %eax, (v)
18845 + adcl %edx, 4(v)
18846 RET_ENDP
18847 #undef v
18848
18849 @@ -77,6 +99,24 @@ RET_ENDP
18850 BEGIN(add_return)
18851 addl (v), %eax
18852 adcl 4(v), %edx
18853 +
18854 +#ifdef CONFIG_PAX_REFCOUNT
18855 + into
18856 +1234:
18857 + _ASM_EXTABLE(1234b, 2f)
18858 +#endif
18859 +
18860 + movl %eax, (v)
18861 + movl %edx, 4(v)
18862 +
18863 +#ifdef CONFIG_PAX_REFCOUNT
18864 +2:
18865 +#endif
18866 +
18867 +RET_ENDP
18868 +BEGIN(add_return_unchecked)
18869 + addl (v), %eax
18870 + adcl 4(v), %edx
18871 movl %eax, (v)
18872 movl %edx, 4(v)
18873 RET_ENDP
18874 @@ -86,6 +126,20 @@ RET_ENDP
18875 BEGIN(sub)
18876 subl %eax, (v)
18877 sbbl %edx, 4(v)
18878 +
18879 +#ifdef CONFIG_PAX_REFCOUNT
18880 + jno 0f
18881 + addl %eax, (v)
18882 + adcl %edx, 4(v)
18883 + int $4
18884 +0:
18885 + _ASM_EXTABLE(0b, 0b)
18886 +#endif
18887 +
18888 +RET_ENDP
18889 +BEGIN(sub_unchecked)
18890 + subl %eax, (v)
18891 + sbbl %edx, 4(v)
18892 RET_ENDP
18893 #undef v
18894
18895 @@ -96,6 +150,27 @@ BEGIN(sub_return)
18896 sbbl $0, %edx
18897 addl (v), %eax
18898 adcl 4(v), %edx
18899 +
18900 +#ifdef CONFIG_PAX_REFCOUNT
18901 + into
18902 +1234:
18903 + _ASM_EXTABLE(1234b, 2f)
18904 +#endif
18905 +
18906 + movl %eax, (v)
18907 + movl %edx, 4(v)
18908 +
18909 +#ifdef CONFIG_PAX_REFCOUNT
18910 +2:
18911 +#endif
18912 +
18913 +RET_ENDP
18914 +BEGIN(sub_return_unchecked)
18915 + negl %edx
18916 + negl %eax
18917 + sbbl $0, %edx
18918 + addl (v), %eax
18919 + adcl 4(v), %edx
18920 movl %eax, (v)
18921 movl %edx, 4(v)
18922 RET_ENDP
18923 @@ -105,6 +180,20 @@ RET_ENDP
18924 BEGIN(inc)
18925 addl $1, (v)
18926 adcl $0, 4(v)
18927 +
18928 +#ifdef CONFIG_PAX_REFCOUNT
18929 + jno 0f
18930 + subl $1, (v)
18931 + sbbl $0, 4(v)
18932 + int $4
18933 +0:
18934 + _ASM_EXTABLE(0b, 0b)
18935 +#endif
18936 +
18937 +RET_ENDP
18938 +BEGIN(inc_unchecked)
18939 + addl $1, (v)
18940 + adcl $0, 4(v)
18941 RET_ENDP
18942 #undef v
18943
18944 @@ -114,6 +203,26 @@ BEGIN(inc_return)
18945 movl 4(v), %edx
18946 addl $1, %eax
18947 adcl $0, %edx
18948 +
18949 +#ifdef CONFIG_PAX_REFCOUNT
18950 + into
18951 +1234:
18952 + _ASM_EXTABLE(1234b, 2f)
18953 +#endif
18954 +
18955 + movl %eax, (v)
18956 + movl %edx, 4(v)
18957 +
18958 +#ifdef CONFIG_PAX_REFCOUNT
18959 +2:
18960 +#endif
18961 +
18962 +RET_ENDP
18963 +BEGIN(inc_return_unchecked)
18964 + movl (v), %eax
18965 + movl 4(v), %edx
18966 + addl $1, %eax
18967 + adcl $0, %edx
18968 movl %eax, (v)
18969 movl %edx, 4(v)
18970 RET_ENDP
18971 @@ -123,6 +232,20 @@ RET_ENDP
18972 BEGIN(dec)
18973 subl $1, (v)
18974 sbbl $0, 4(v)
18975 +
18976 +#ifdef CONFIG_PAX_REFCOUNT
18977 + jno 0f
18978 + addl $1, (v)
18979 + adcl $0, 4(v)
18980 + int $4
18981 +0:
18982 + _ASM_EXTABLE(0b, 0b)
18983 +#endif
18984 +
18985 +RET_ENDP
18986 +BEGIN(dec_unchecked)
18987 + subl $1, (v)
18988 + sbbl $0, 4(v)
18989 RET_ENDP
18990 #undef v
18991
18992 @@ -132,6 +255,26 @@ BEGIN(dec_return)
18993 movl 4(v), %edx
18994 subl $1, %eax
18995 sbbl $0, %edx
18996 +
18997 +#ifdef CONFIG_PAX_REFCOUNT
18998 + into
18999 +1234:
19000 + _ASM_EXTABLE(1234b, 2f)
19001 +#endif
19002 +
19003 + movl %eax, (v)
19004 + movl %edx, 4(v)
19005 +
19006 +#ifdef CONFIG_PAX_REFCOUNT
19007 +2:
19008 +#endif
19009 +
19010 +RET_ENDP
19011 +BEGIN(dec_return_unchecked)
19012 + movl (v), %eax
19013 + movl 4(v), %edx
19014 + subl $1, %eax
19015 + sbbl $0, %edx
19016 movl %eax, (v)
19017 movl %edx, 4(v)
19018 RET_ENDP
19019 @@ -143,6 +286,13 @@ BEGIN(add_unless)
19020 adcl %edx, %edi
19021 addl (v), %eax
19022 adcl 4(v), %edx
19023 +
19024 +#ifdef CONFIG_PAX_REFCOUNT
19025 + into
19026 +1234:
19027 + _ASM_EXTABLE(1234b, 2f)
19028 +#endif
19029 +
19030 cmpl %eax, %esi
19031 je 3f
19032 1:
19033 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
19034 1:
19035 addl $1, %eax
19036 adcl $0, %edx
19037 +
19038 +#ifdef CONFIG_PAX_REFCOUNT
19039 + into
19040 +1234:
19041 + _ASM_EXTABLE(1234b, 2f)
19042 +#endif
19043 +
19044 movl %eax, (v)
19045 movl %edx, 4(v)
19046 movl $1, %eax
19047 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
19048 movl 4(v), %edx
19049 subl $1, %eax
19050 sbbl $0, %edx
19051 +
19052 +#ifdef CONFIG_PAX_REFCOUNT
19053 + into
19054 +1234:
19055 + _ASM_EXTABLE(1234b, 1f)
19056 +#endif
19057 +
19058 js 1f
19059 movl %eax, (v)
19060 movl %edx, 4(v)
19061 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
19062 index 391a083..d658e9f 100644
19063 --- a/arch/x86/lib/atomic64_cx8_32.S
19064 +++ b/arch/x86/lib/atomic64_cx8_32.S
19065 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
19066 CFI_STARTPROC
19067
19068 read64 %ecx
19069 + pax_force_retaddr
19070 ret
19071 CFI_ENDPROC
19072 ENDPROC(atomic64_read_cx8)
19073
19074 +ENTRY(atomic64_read_unchecked_cx8)
19075 + CFI_STARTPROC
19076 +
19077 + read64 %ecx
19078 + pax_force_retaddr
19079 + ret
19080 + CFI_ENDPROC
19081 +ENDPROC(atomic64_read_unchecked_cx8)
19082 +
19083 ENTRY(atomic64_set_cx8)
19084 CFI_STARTPROC
19085
19086 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
19087 cmpxchg8b (%esi)
19088 jne 1b
19089
19090 + pax_force_retaddr
19091 ret
19092 CFI_ENDPROC
19093 ENDPROC(atomic64_set_cx8)
19094
19095 +ENTRY(atomic64_set_unchecked_cx8)
19096 + CFI_STARTPROC
19097 +
19098 +1:
19099 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
19100 + * are atomic on 586 and newer */
19101 + cmpxchg8b (%esi)
19102 + jne 1b
19103 +
19104 + pax_force_retaddr
19105 + ret
19106 + CFI_ENDPROC
19107 +ENDPROC(atomic64_set_unchecked_cx8)
19108 +
19109 ENTRY(atomic64_xchg_cx8)
19110 CFI_STARTPROC
19111
19112 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
19113 cmpxchg8b (%esi)
19114 jne 1b
19115
19116 + pax_force_retaddr
19117 ret
19118 CFI_ENDPROC
19119 ENDPROC(atomic64_xchg_cx8)
19120
19121 -.macro addsub_return func ins insc
19122 -ENTRY(atomic64_\func\()_return_cx8)
19123 +.macro addsub_return func ins insc unchecked=""
19124 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
19125 CFI_STARTPROC
19126 SAVE ebp
19127 SAVE ebx
19128 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
19129 movl %edx, %ecx
19130 \ins\()l %esi, %ebx
19131 \insc\()l %edi, %ecx
19132 +
19133 +.ifb \unchecked
19134 +#ifdef CONFIG_PAX_REFCOUNT
19135 + into
19136 +2:
19137 + _ASM_EXTABLE(2b, 3f)
19138 +#endif
19139 +.endif
19140 +
19141 LOCK_PREFIX
19142 cmpxchg8b (%ebp)
19143 jne 1b
19144 -
19145 -10:
19146 movl %ebx, %eax
19147 movl %ecx, %edx
19148 +
19149 +.ifb \unchecked
19150 +#ifdef CONFIG_PAX_REFCOUNT
19151 +3:
19152 +#endif
19153 +.endif
19154 +
19155 RESTORE edi
19156 RESTORE esi
19157 RESTORE ebx
19158 RESTORE ebp
19159 + pax_force_retaddr
19160 ret
19161 CFI_ENDPROC
19162 -ENDPROC(atomic64_\func\()_return_cx8)
19163 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
19164 .endm
19165
19166 addsub_return add add adc
19167 addsub_return sub sub sbb
19168 +addsub_return add add adc _unchecked
19169 +addsub_return sub sub sbb _unchecked
19170
19171 -.macro incdec_return func ins insc
19172 -ENTRY(atomic64_\func\()_return_cx8)
19173 +.macro incdec_return func ins insc unchecked
19174 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
19175 CFI_STARTPROC
19176 SAVE ebx
19177
19178 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
19179 movl %edx, %ecx
19180 \ins\()l $1, %ebx
19181 \insc\()l $0, %ecx
19182 +
19183 +.ifb \unchecked
19184 +#ifdef CONFIG_PAX_REFCOUNT
19185 + into
19186 +2:
19187 + _ASM_EXTABLE(2b, 3f)
19188 +#endif
19189 +.endif
19190 +
19191 LOCK_PREFIX
19192 cmpxchg8b (%esi)
19193 jne 1b
19194
19195 -10:
19196 movl %ebx, %eax
19197 movl %ecx, %edx
19198 +
19199 +.ifb \unchecked
19200 +#ifdef CONFIG_PAX_REFCOUNT
19201 +3:
19202 +#endif
19203 +.endif
19204 +
19205 RESTORE ebx
19206 + pax_force_retaddr
19207 ret
19208 CFI_ENDPROC
19209 -ENDPROC(atomic64_\func\()_return_cx8)
19210 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
19211 .endm
19212
19213 incdec_return inc add adc
19214 incdec_return dec sub sbb
19215 +incdec_return inc add adc _unchecked
19216 +incdec_return dec sub sbb _unchecked
19217
19218 ENTRY(atomic64_dec_if_positive_cx8)
19219 CFI_STARTPROC
19220 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
19221 movl %edx, %ecx
19222 subl $1, %ebx
19223 sbb $0, %ecx
19224 +
19225 +#ifdef CONFIG_PAX_REFCOUNT
19226 + into
19227 +1234:
19228 + _ASM_EXTABLE(1234b, 2f)
19229 +#endif
19230 +
19231 js 2f
19232 LOCK_PREFIX
19233 cmpxchg8b (%esi)
19234 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
19235 movl %ebx, %eax
19236 movl %ecx, %edx
19237 RESTORE ebx
19238 + pax_force_retaddr
19239 ret
19240 CFI_ENDPROC
19241 ENDPROC(atomic64_dec_if_positive_cx8)
19242 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
19243 movl %edx, %ecx
19244 addl %esi, %ebx
19245 adcl %edi, %ecx
19246 +
19247 +#ifdef CONFIG_PAX_REFCOUNT
19248 + into
19249 +1234:
19250 + _ASM_EXTABLE(1234b, 3f)
19251 +#endif
19252 +
19253 LOCK_PREFIX
19254 cmpxchg8b (%ebp)
19255 jne 1b
19256 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19257 CFI_ADJUST_CFA_OFFSET -8
19258 RESTORE ebx
19259 RESTORE ebp
19260 + pax_force_retaddr
19261 ret
19262 4:
19263 cmpl %edx, 4(%esp)
19264 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19265 movl %edx, %ecx
19266 addl $1, %ebx
19267 adcl $0, %ecx
19268 +
19269 +#ifdef CONFIG_PAX_REFCOUNT
19270 + into
19271 +1234:
19272 + _ASM_EXTABLE(1234b, 3f)
19273 +#endif
19274 +
19275 LOCK_PREFIX
19276 cmpxchg8b (%esi)
19277 jne 1b
19278 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19279 movl $1, %eax
19280 3:
19281 RESTORE ebx
19282 + pax_force_retaddr
19283 ret
19284 4:
19285 testl %edx, %edx
19286 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19287 index 78d16a5..fbcf666 100644
19288 --- a/arch/x86/lib/checksum_32.S
19289 +++ b/arch/x86/lib/checksum_32.S
19290 @@ -28,7 +28,8 @@
19291 #include <linux/linkage.h>
19292 #include <asm/dwarf2.h>
19293 #include <asm/errno.h>
19294 -
19295 +#include <asm/segment.h>
19296 +
19297 /*
19298 * computes a partial checksum, e.g. for TCP/UDP fragments
19299 */
19300 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19301
19302 #define ARGBASE 16
19303 #define FP 12
19304 -
19305 -ENTRY(csum_partial_copy_generic)
19306 +
19307 +ENTRY(csum_partial_copy_generic_to_user)
19308 CFI_STARTPROC
19309 +
19310 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19311 + pushl_cfi %gs
19312 + popl_cfi %es
19313 + jmp csum_partial_copy_generic
19314 +#endif
19315 +
19316 +ENTRY(csum_partial_copy_generic_from_user)
19317 +
19318 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19319 + pushl_cfi %gs
19320 + popl_cfi %ds
19321 +#endif
19322 +
19323 +ENTRY(csum_partial_copy_generic)
19324 subl $4,%esp
19325 CFI_ADJUST_CFA_OFFSET 4
19326 pushl_cfi %edi
19327 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19328 jmp 4f
19329 SRC(1: movw (%esi), %bx )
19330 addl $2, %esi
19331 -DST( movw %bx, (%edi) )
19332 +DST( movw %bx, %es:(%edi) )
19333 addl $2, %edi
19334 addw %bx, %ax
19335 adcl $0, %eax
19336 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19337 SRC(1: movl (%esi), %ebx )
19338 SRC( movl 4(%esi), %edx )
19339 adcl %ebx, %eax
19340 -DST( movl %ebx, (%edi) )
19341 +DST( movl %ebx, %es:(%edi) )
19342 adcl %edx, %eax
19343 -DST( movl %edx, 4(%edi) )
19344 +DST( movl %edx, %es:4(%edi) )
19345
19346 SRC( movl 8(%esi), %ebx )
19347 SRC( movl 12(%esi), %edx )
19348 adcl %ebx, %eax
19349 -DST( movl %ebx, 8(%edi) )
19350 +DST( movl %ebx, %es:8(%edi) )
19351 adcl %edx, %eax
19352 -DST( movl %edx, 12(%edi) )
19353 +DST( movl %edx, %es:12(%edi) )
19354
19355 SRC( movl 16(%esi), %ebx )
19356 SRC( movl 20(%esi), %edx )
19357 adcl %ebx, %eax
19358 -DST( movl %ebx, 16(%edi) )
19359 +DST( movl %ebx, %es:16(%edi) )
19360 adcl %edx, %eax
19361 -DST( movl %edx, 20(%edi) )
19362 +DST( movl %edx, %es:20(%edi) )
19363
19364 SRC( movl 24(%esi), %ebx )
19365 SRC( movl 28(%esi), %edx )
19366 adcl %ebx, %eax
19367 -DST( movl %ebx, 24(%edi) )
19368 +DST( movl %ebx, %es:24(%edi) )
19369 adcl %edx, %eax
19370 -DST( movl %edx, 28(%edi) )
19371 +DST( movl %edx, %es:28(%edi) )
19372
19373 lea 32(%esi), %esi
19374 lea 32(%edi), %edi
19375 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19376 shrl $2, %edx # This clears CF
19377 SRC(3: movl (%esi), %ebx )
19378 adcl %ebx, %eax
19379 -DST( movl %ebx, (%edi) )
19380 +DST( movl %ebx, %es:(%edi) )
19381 lea 4(%esi), %esi
19382 lea 4(%edi), %edi
19383 dec %edx
19384 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19385 jb 5f
19386 SRC( movw (%esi), %cx )
19387 leal 2(%esi), %esi
19388 -DST( movw %cx, (%edi) )
19389 +DST( movw %cx, %es:(%edi) )
19390 leal 2(%edi), %edi
19391 je 6f
19392 shll $16,%ecx
19393 SRC(5: movb (%esi), %cl )
19394 -DST( movb %cl, (%edi) )
19395 +DST( movb %cl, %es:(%edi) )
19396 6: addl %ecx, %eax
19397 adcl $0, %eax
19398 7:
19399 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19400
19401 6001:
19402 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19403 - movl $-EFAULT, (%ebx)
19404 + movl $-EFAULT, %ss:(%ebx)
19405
19406 # zero the complete destination - computing the rest
19407 # is too much work
19408 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19409
19410 6002:
19411 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19412 - movl $-EFAULT,(%ebx)
19413 + movl $-EFAULT,%ss:(%ebx)
19414 jmp 5000b
19415
19416 .previous
19417
19418 + pushl_cfi %ss
19419 + popl_cfi %ds
19420 + pushl_cfi %ss
19421 + popl_cfi %es
19422 popl_cfi %ebx
19423 CFI_RESTORE ebx
19424 popl_cfi %esi
19425 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19426 popl_cfi %ecx # equivalent to addl $4,%esp
19427 ret
19428 CFI_ENDPROC
19429 -ENDPROC(csum_partial_copy_generic)
19430 +ENDPROC(csum_partial_copy_generic_to_user)
19431
19432 #else
19433
19434 /* Version for PentiumII/PPro */
19435
19436 #define ROUND1(x) \
19437 + nop; nop; nop; \
19438 SRC(movl x(%esi), %ebx ) ; \
19439 addl %ebx, %eax ; \
19440 - DST(movl %ebx, x(%edi) ) ;
19441 + DST(movl %ebx, %es:x(%edi)) ;
19442
19443 #define ROUND(x) \
19444 + nop; nop; nop; \
19445 SRC(movl x(%esi), %ebx ) ; \
19446 adcl %ebx, %eax ; \
19447 - DST(movl %ebx, x(%edi) ) ;
19448 + DST(movl %ebx, %es:x(%edi)) ;
19449
19450 #define ARGBASE 12
19451 -
19452 -ENTRY(csum_partial_copy_generic)
19453 +
19454 +ENTRY(csum_partial_copy_generic_to_user)
19455 CFI_STARTPROC
19456 +
19457 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19458 + pushl_cfi %gs
19459 + popl_cfi %es
19460 + jmp csum_partial_copy_generic
19461 +#endif
19462 +
19463 +ENTRY(csum_partial_copy_generic_from_user)
19464 +
19465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19466 + pushl_cfi %gs
19467 + popl_cfi %ds
19468 +#endif
19469 +
19470 +ENTRY(csum_partial_copy_generic)
19471 pushl_cfi %ebx
19472 CFI_REL_OFFSET ebx, 0
19473 pushl_cfi %edi
19474 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19475 subl %ebx, %edi
19476 lea -1(%esi),%edx
19477 andl $-32,%edx
19478 - lea 3f(%ebx,%ebx), %ebx
19479 + lea 3f(%ebx,%ebx,2), %ebx
19480 testl %esi, %esi
19481 jmp *%ebx
19482 1: addl $64,%esi
19483 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19484 jb 5f
19485 SRC( movw (%esi), %dx )
19486 leal 2(%esi), %esi
19487 -DST( movw %dx, (%edi) )
19488 +DST( movw %dx, %es:(%edi) )
19489 leal 2(%edi), %edi
19490 je 6f
19491 shll $16,%edx
19492 5:
19493 SRC( movb (%esi), %dl )
19494 -DST( movb %dl, (%edi) )
19495 +DST( movb %dl, %es:(%edi) )
19496 6: addl %edx, %eax
19497 adcl $0, %eax
19498 7:
19499 .section .fixup, "ax"
19500 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19501 - movl $-EFAULT, (%ebx)
19502 + movl $-EFAULT, %ss:(%ebx)
19503 # zero the complete destination (computing the rest is too much work)
19504 movl ARGBASE+8(%esp),%edi # dst
19505 movl ARGBASE+12(%esp),%ecx # len
19506 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19507 rep; stosb
19508 jmp 7b
19509 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19510 - movl $-EFAULT, (%ebx)
19511 + movl $-EFAULT, %ss:(%ebx)
19512 jmp 7b
19513 .previous
19514
19515 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19516 + pushl_cfi %ss
19517 + popl_cfi %ds
19518 + pushl_cfi %ss
19519 + popl_cfi %es
19520 +#endif
19521 +
19522 popl_cfi %esi
19523 CFI_RESTORE esi
19524 popl_cfi %edi
19525 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19526 CFI_RESTORE ebx
19527 ret
19528 CFI_ENDPROC
19529 -ENDPROC(csum_partial_copy_generic)
19530 +ENDPROC(csum_partial_copy_generic_to_user)
19531
19532 #undef ROUND
19533 #undef ROUND1
19534 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19535 index f2145cf..cea889d 100644
19536 --- a/arch/x86/lib/clear_page_64.S
19537 +++ b/arch/x86/lib/clear_page_64.S
19538 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19539 movl $4096/8,%ecx
19540 xorl %eax,%eax
19541 rep stosq
19542 + pax_force_retaddr
19543 ret
19544 CFI_ENDPROC
19545 ENDPROC(clear_page_c)
19546 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19547 movl $4096,%ecx
19548 xorl %eax,%eax
19549 rep stosb
19550 + pax_force_retaddr
19551 ret
19552 CFI_ENDPROC
19553 ENDPROC(clear_page_c_e)
19554 @@ -43,6 +45,7 @@ ENTRY(clear_page)
19555 leaq 64(%rdi),%rdi
19556 jnz .Lloop
19557 nop
19558 + pax_force_retaddr
19559 ret
19560 CFI_ENDPROC
19561 .Lclear_page_end:
19562 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
19563
19564 #include <asm/cpufeature.h>
19565
19566 - .section .altinstr_replacement,"ax"
19567 + .section .altinstr_replacement,"a"
19568 1: .byte 0xeb /* jmp <disp8> */
19569 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19570 2: .byte 0xeb /* jmp <disp8> */
19571 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19572 index 1e572c5..2a162cd 100644
19573 --- a/arch/x86/lib/cmpxchg16b_emu.S
19574 +++ b/arch/x86/lib/cmpxchg16b_emu.S
19575 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19576
19577 popf
19578 mov $1, %al
19579 + pax_force_retaddr
19580 ret
19581
19582 not_same:
19583 popf
19584 xor %al,%al
19585 + pax_force_retaddr
19586 ret
19587
19588 CFI_ENDPROC
19589 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19590 index 01c805b..dccb07f 100644
19591 --- a/arch/x86/lib/copy_page_64.S
19592 +++ b/arch/x86/lib/copy_page_64.S
19593 @@ -9,6 +9,7 @@ copy_page_c:
19594 CFI_STARTPROC
19595 movl $4096/8,%ecx
19596 rep movsq
19597 + pax_force_retaddr
19598 ret
19599 CFI_ENDPROC
19600 ENDPROC(copy_page_c)
19601 @@ -39,7 +40,7 @@ ENTRY(copy_page)
19602 movq 16 (%rsi), %rdx
19603 movq 24 (%rsi), %r8
19604 movq 32 (%rsi), %r9
19605 - movq 40 (%rsi), %r10
19606 + movq 40 (%rsi), %r13
19607 movq 48 (%rsi), %r11
19608 movq 56 (%rsi), %r12
19609
19610 @@ -50,7 +51,7 @@ ENTRY(copy_page)
19611 movq %rdx, 16 (%rdi)
19612 movq %r8, 24 (%rdi)
19613 movq %r9, 32 (%rdi)
19614 - movq %r10, 40 (%rdi)
19615 + movq %r13, 40 (%rdi)
19616 movq %r11, 48 (%rdi)
19617 movq %r12, 56 (%rdi)
19618
19619 @@ -69,7 +70,7 @@ ENTRY(copy_page)
19620 movq 16 (%rsi), %rdx
19621 movq 24 (%rsi), %r8
19622 movq 32 (%rsi), %r9
19623 - movq 40 (%rsi), %r10
19624 + movq 40 (%rsi), %r13
19625 movq 48 (%rsi), %r11
19626 movq 56 (%rsi), %r12
19627
19628 @@ -78,7 +79,7 @@ ENTRY(copy_page)
19629 movq %rdx, 16 (%rdi)
19630 movq %r8, 24 (%rdi)
19631 movq %r9, 32 (%rdi)
19632 - movq %r10, 40 (%rdi)
19633 + movq %r13, 40 (%rdi)
19634 movq %r11, 48 (%rdi)
19635 movq %r12, 56 (%rdi)
19636
19637 @@ -95,6 +96,7 @@ ENTRY(copy_page)
19638 CFI_RESTORE r13
19639 addq $3*8,%rsp
19640 CFI_ADJUST_CFA_OFFSET -3*8
19641 + pax_force_retaddr
19642 ret
19643 .Lcopy_page_end:
19644 CFI_ENDPROC
19645 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
19646
19647 #include <asm/cpufeature.h>
19648
19649 - .section .altinstr_replacement,"ax"
19650 + .section .altinstr_replacement,"a"
19651 1: .byte 0xeb /* jmp <disp8> */
19652 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19653 2:
19654 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19655 index 0248402..821c786 100644
19656 --- a/arch/x86/lib/copy_user_64.S
19657 +++ b/arch/x86/lib/copy_user_64.S
19658 @@ -16,6 +16,7 @@
19659 #include <asm/thread_info.h>
19660 #include <asm/cpufeature.h>
19661 #include <asm/alternative-asm.h>
19662 +#include <asm/pgtable.h>
19663
19664 /*
19665 * By placing feature2 after feature1 in altinstructions section, we logically
19666 @@ -29,7 +30,7 @@
19667 .byte 0xe9 /* 32bit jump */
19668 .long \orig-1f /* by default jump to orig */
19669 1:
19670 - .section .altinstr_replacement,"ax"
19671 + .section .altinstr_replacement,"a"
19672 2: .byte 0xe9 /* near jump with 32bit immediate */
19673 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19674 3: .byte 0xe9 /* near jump with 32bit immediate */
19675 @@ -71,47 +72,20 @@
19676 #endif
19677 .endm
19678
19679 -/* Standard copy_to_user with segment limit checking */
19680 -ENTRY(_copy_to_user)
19681 - CFI_STARTPROC
19682 - GET_THREAD_INFO(%rax)
19683 - movq %rdi,%rcx
19684 - addq %rdx,%rcx
19685 - jc bad_to_user
19686 - cmpq TI_addr_limit(%rax),%rcx
19687 - ja bad_to_user
19688 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19689 - copy_user_generic_unrolled,copy_user_generic_string, \
19690 - copy_user_enhanced_fast_string
19691 - CFI_ENDPROC
19692 -ENDPROC(_copy_to_user)
19693 -
19694 -/* Standard copy_from_user with segment limit checking */
19695 -ENTRY(_copy_from_user)
19696 - CFI_STARTPROC
19697 - GET_THREAD_INFO(%rax)
19698 - movq %rsi,%rcx
19699 - addq %rdx,%rcx
19700 - jc bad_from_user
19701 - cmpq TI_addr_limit(%rax),%rcx
19702 - ja bad_from_user
19703 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19704 - copy_user_generic_unrolled,copy_user_generic_string, \
19705 - copy_user_enhanced_fast_string
19706 - CFI_ENDPROC
19707 -ENDPROC(_copy_from_user)
19708 -
19709 .section .fixup,"ax"
19710 /* must zero dest */
19711 ENTRY(bad_from_user)
19712 bad_from_user:
19713 CFI_STARTPROC
19714 + testl %edx,%edx
19715 + js bad_to_user
19716 movl %edx,%ecx
19717 xorl %eax,%eax
19718 rep
19719 stosb
19720 bad_to_user:
19721 movl %edx,%eax
19722 + pax_force_retaddr
19723 ret
19724 CFI_ENDPROC
19725 ENDPROC(bad_from_user)
19726 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19727 jz 17f
19728 1: movq (%rsi),%r8
19729 2: movq 1*8(%rsi),%r9
19730 -3: movq 2*8(%rsi),%r10
19731 +3: movq 2*8(%rsi),%rax
19732 4: movq 3*8(%rsi),%r11
19733 5: movq %r8,(%rdi)
19734 6: movq %r9,1*8(%rdi)
19735 -7: movq %r10,2*8(%rdi)
19736 +7: movq %rax,2*8(%rdi)
19737 8: movq %r11,3*8(%rdi)
19738 9: movq 4*8(%rsi),%r8
19739 10: movq 5*8(%rsi),%r9
19740 -11: movq 6*8(%rsi),%r10
19741 +11: movq 6*8(%rsi),%rax
19742 12: movq 7*8(%rsi),%r11
19743 13: movq %r8,4*8(%rdi)
19744 14: movq %r9,5*8(%rdi)
19745 -15: movq %r10,6*8(%rdi)
19746 +15: movq %rax,6*8(%rdi)
19747 16: movq %r11,7*8(%rdi)
19748 leaq 64(%rsi),%rsi
19749 leaq 64(%rdi),%rdi
19750 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19751 decl %ecx
19752 jnz 21b
19753 23: xor %eax,%eax
19754 + pax_force_retaddr
19755 ret
19756
19757 .section .fixup,"ax"
19758 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19759 3: rep
19760 movsb
19761 4: xorl %eax,%eax
19762 + pax_force_retaddr
19763 ret
19764
19765 .section .fixup,"ax"
19766 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19767 1: rep
19768 movsb
19769 2: xorl %eax,%eax
19770 + pax_force_retaddr
19771 ret
19772
19773 .section .fixup,"ax"
19774 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19775 index cb0c112..e3a6895 100644
19776 --- a/arch/x86/lib/copy_user_nocache_64.S
19777 +++ b/arch/x86/lib/copy_user_nocache_64.S
19778 @@ -8,12 +8,14 @@
19779
19780 #include <linux/linkage.h>
19781 #include <asm/dwarf2.h>
19782 +#include <asm/alternative-asm.h>
19783
19784 #define FIX_ALIGNMENT 1
19785
19786 #include <asm/current.h>
19787 #include <asm/asm-offsets.h>
19788 #include <asm/thread_info.h>
19789 +#include <asm/pgtable.h>
19790
19791 .macro ALIGN_DESTINATION
19792 #ifdef FIX_ALIGNMENT
19793 @@ -50,6 +52,15 @@
19794 */
19795 ENTRY(__copy_user_nocache)
19796 CFI_STARTPROC
19797 +
19798 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19799 + mov $PAX_USER_SHADOW_BASE,%rcx
19800 + cmp %rcx,%rsi
19801 + jae 1f
19802 + add %rcx,%rsi
19803 +1:
19804 +#endif
19805 +
19806 cmpl $8,%edx
19807 jb 20f /* less then 8 bytes, go to byte copy loop */
19808 ALIGN_DESTINATION
19809 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19810 jz 17f
19811 1: movq (%rsi),%r8
19812 2: movq 1*8(%rsi),%r9
19813 -3: movq 2*8(%rsi),%r10
19814 +3: movq 2*8(%rsi),%rax
19815 4: movq 3*8(%rsi),%r11
19816 5: movnti %r8,(%rdi)
19817 6: movnti %r9,1*8(%rdi)
19818 -7: movnti %r10,2*8(%rdi)
19819 +7: movnti %rax,2*8(%rdi)
19820 8: movnti %r11,3*8(%rdi)
19821 9: movq 4*8(%rsi),%r8
19822 10: movq 5*8(%rsi),%r9
19823 -11: movq 6*8(%rsi),%r10
19824 +11: movq 6*8(%rsi),%rax
19825 12: movq 7*8(%rsi),%r11
19826 13: movnti %r8,4*8(%rdi)
19827 14: movnti %r9,5*8(%rdi)
19828 -15: movnti %r10,6*8(%rdi)
19829 +15: movnti %rax,6*8(%rdi)
19830 16: movnti %r11,7*8(%rdi)
19831 leaq 64(%rsi),%rsi
19832 leaq 64(%rdi),%rdi
19833 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19834 jnz 21b
19835 23: xorl %eax,%eax
19836 sfence
19837 + pax_force_retaddr
19838 ret
19839
19840 .section .fixup,"ax"
19841 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19842 index fb903b7..c92b7f7 100644
19843 --- a/arch/x86/lib/csum-copy_64.S
19844 +++ b/arch/x86/lib/csum-copy_64.S
19845 @@ -8,6 +8,7 @@
19846 #include <linux/linkage.h>
19847 #include <asm/dwarf2.h>
19848 #include <asm/errno.h>
19849 +#include <asm/alternative-asm.h>
19850
19851 /*
19852 * Checksum copy with exception handling.
19853 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19854 CFI_RESTORE rbp
19855 addq $7*8, %rsp
19856 CFI_ADJUST_CFA_OFFSET -7*8
19857 + pax_force_retaddr 0, 1
19858 ret
19859 CFI_RESTORE_STATE
19860
19861 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19862 index 459b58a..9570bc7 100644
19863 --- a/arch/x86/lib/csum-wrappers_64.c
19864 +++ b/arch/x86/lib/csum-wrappers_64.c
19865 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19866 len -= 2;
19867 }
19868 }
19869 - isum = csum_partial_copy_generic((__force const void *)src,
19870 +
19871 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19872 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19873 + src += PAX_USER_SHADOW_BASE;
19874 +#endif
19875 +
19876 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
19877 dst, len, isum, errp, NULL);
19878 if (unlikely(*errp))
19879 goto out_err;
19880 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19881 }
19882
19883 *errp = 0;
19884 - return csum_partial_copy_generic(src, (void __force *)dst,
19885 +
19886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19887 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19888 + dst += PAX_USER_SHADOW_BASE;
19889 +#endif
19890 +
19891 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19892 len, isum, NULL, errp);
19893 }
19894 EXPORT_SYMBOL(csum_partial_copy_to_user);
19895 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19896 index 51f1504..ddac4c1 100644
19897 --- a/arch/x86/lib/getuser.S
19898 +++ b/arch/x86/lib/getuser.S
19899 @@ -33,15 +33,38 @@
19900 #include <asm/asm-offsets.h>
19901 #include <asm/thread_info.h>
19902 #include <asm/asm.h>
19903 +#include <asm/segment.h>
19904 +#include <asm/pgtable.h>
19905 +#include <asm/alternative-asm.h>
19906 +
19907 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19908 +#define __copyuser_seg gs;
19909 +#else
19910 +#define __copyuser_seg
19911 +#endif
19912
19913 .text
19914 ENTRY(__get_user_1)
19915 CFI_STARTPROC
19916 +
19917 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19918 GET_THREAD_INFO(%_ASM_DX)
19919 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19920 jae bad_get_user
19921 -1: movzb (%_ASM_AX),%edx
19922 +
19923 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19924 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19925 + cmp %_ASM_DX,%_ASM_AX
19926 + jae 1234f
19927 + add %_ASM_DX,%_ASM_AX
19928 +1234:
19929 +#endif
19930 +
19931 +#endif
19932 +
19933 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19934 xor %eax,%eax
19935 + pax_force_retaddr
19936 ret
19937 CFI_ENDPROC
19938 ENDPROC(__get_user_1)
19939 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19940 ENTRY(__get_user_2)
19941 CFI_STARTPROC
19942 add $1,%_ASM_AX
19943 +
19944 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19945 jc bad_get_user
19946 GET_THREAD_INFO(%_ASM_DX)
19947 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19948 jae bad_get_user
19949 -2: movzwl -1(%_ASM_AX),%edx
19950 +
19951 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19952 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19953 + cmp %_ASM_DX,%_ASM_AX
19954 + jae 1234f
19955 + add %_ASM_DX,%_ASM_AX
19956 +1234:
19957 +#endif
19958 +
19959 +#endif
19960 +
19961 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19962 xor %eax,%eax
19963 + pax_force_retaddr
19964 ret
19965 CFI_ENDPROC
19966 ENDPROC(__get_user_2)
19967 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19968 ENTRY(__get_user_4)
19969 CFI_STARTPROC
19970 add $3,%_ASM_AX
19971 +
19972 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19973 jc bad_get_user
19974 GET_THREAD_INFO(%_ASM_DX)
19975 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19976 jae bad_get_user
19977 -3: mov -3(%_ASM_AX),%edx
19978 +
19979 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19980 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19981 + cmp %_ASM_DX,%_ASM_AX
19982 + jae 1234f
19983 + add %_ASM_DX,%_ASM_AX
19984 +1234:
19985 +#endif
19986 +
19987 +#endif
19988 +
19989 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19990 xor %eax,%eax
19991 + pax_force_retaddr
19992 ret
19993 CFI_ENDPROC
19994 ENDPROC(__get_user_4)
19995 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19996 GET_THREAD_INFO(%_ASM_DX)
19997 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19998 jae bad_get_user
19999 +
20000 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20001 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
20002 + cmp %_ASM_DX,%_ASM_AX
20003 + jae 1234f
20004 + add %_ASM_DX,%_ASM_AX
20005 +1234:
20006 +#endif
20007 +
20008 4: movq -7(%_ASM_AX),%_ASM_DX
20009 xor %eax,%eax
20010 + pax_force_retaddr
20011 ret
20012 CFI_ENDPROC
20013 ENDPROC(__get_user_8)
20014 @@ -91,6 +152,7 @@ bad_get_user:
20015 CFI_STARTPROC
20016 xor %edx,%edx
20017 mov $(-EFAULT),%_ASM_AX
20018 + pax_force_retaddr
20019 ret
20020 CFI_ENDPROC
20021 END(bad_get_user)
20022 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
20023 index 374562e..a75830b 100644
20024 --- a/arch/x86/lib/insn.c
20025 +++ b/arch/x86/lib/insn.c
20026 @@ -21,6 +21,11 @@
20027 #include <linux/string.h>
20028 #include <asm/inat.h>
20029 #include <asm/insn.h>
20030 +#ifdef __KERNEL__
20031 +#include <asm/pgtable_types.h>
20032 +#else
20033 +#define ktla_ktva(addr) addr
20034 +#endif
20035
20036 /* Verify next sizeof(t) bytes can be on the same instruction */
20037 #define validate_next(t, insn, n) \
20038 @@ -49,8 +54,8 @@
20039 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
20040 {
20041 memset(insn, 0, sizeof(*insn));
20042 - insn->kaddr = kaddr;
20043 - insn->next_byte = kaddr;
20044 + insn->kaddr = ktla_ktva(kaddr);
20045 + insn->next_byte = ktla_ktva(kaddr);
20046 insn->x86_64 = x86_64 ? 1 : 0;
20047 insn->opnd_bytes = 4;
20048 if (x86_64)
20049 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
20050 index 05a95e7..326f2fa 100644
20051 --- a/arch/x86/lib/iomap_copy_64.S
20052 +++ b/arch/x86/lib/iomap_copy_64.S
20053 @@ -17,6 +17,7 @@
20054
20055 #include <linux/linkage.h>
20056 #include <asm/dwarf2.h>
20057 +#include <asm/alternative-asm.h>
20058
20059 /*
20060 * override generic version in lib/iomap_copy.c
20061 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
20062 CFI_STARTPROC
20063 movl %edx,%ecx
20064 rep movsd
20065 + pax_force_retaddr
20066 ret
20067 CFI_ENDPROC
20068 ENDPROC(__iowrite32_copy)
20069 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
20070 index efbf2a0..8893637 100644
20071 --- a/arch/x86/lib/memcpy_64.S
20072 +++ b/arch/x86/lib/memcpy_64.S
20073 @@ -34,6 +34,7 @@
20074 rep movsq
20075 movl %edx, %ecx
20076 rep movsb
20077 + pax_force_retaddr
20078 ret
20079 .Lmemcpy_e:
20080 .previous
20081 @@ -51,6 +52,7 @@
20082
20083 movl %edx, %ecx
20084 rep movsb
20085 + pax_force_retaddr
20086 ret
20087 .Lmemcpy_e_e:
20088 .previous
20089 @@ -81,13 +83,13 @@ ENTRY(memcpy)
20090 */
20091 movq 0*8(%rsi), %r8
20092 movq 1*8(%rsi), %r9
20093 - movq 2*8(%rsi), %r10
20094 + movq 2*8(%rsi), %rcx
20095 movq 3*8(%rsi), %r11
20096 leaq 4*8(%rsi), %rsi
20097
20098 movq %r8, 0*8(%rdi)
20099 movq %r9, 1*8(%rdi)
20100 - movq %r10, 2*8(%rdi)
20101 + movq %rcx, 2*8(%rdi)
20102 movq %r11, 3*8(%rdi)
20103 leaq 4*8(%rdi), %rdi
20104 jae .Lcopy_forward_loop
20105 @@ -110,12 +112,12 @@ ENTRY(memcpy)
20106 subq $0x20, %rdx
20107 movq -1*8(%rsi), %r8
20108 movq -2*8(%rsi), %r9
20109 - movq -3*8(%rsi), %r10
20110 + movq -3*8(%rsi), %rcx
20111 movq -4*8(%rsi), %r11
20112 leaq -4*8(%rsi), %rsi
20113 movq %r8, -1*8(%rdi)
20114 movq %r9, -2*8(%rdi)
20115 - movq %r10, -3*8(%rdi)
20116 + movq %rcx, -3*8(%rdi)
20117 movq %r11, -4*8(%rdi)
20118 leaq -4*8(%rdi), %rdi
20119 jae .Lcopy_backward_loop
20120 @@ -135,12 +137,13 @@ ENTRY(memcpy)
20121 */
20122 movq 0*8(%rsi), %r8
20123 movq 1*8(%rsi), %r9
20124 - movq -2*8(%rsi, %rdx), %r10
20125 + movq -2*8(%rsi, %rdx), %rcx
20126 movq -1*8(%rsi, %rdx), %r11
20127 movq %r8, 0*8(%rdi)
20128 movq %r9, 1*8(%rdi)
20129 - movq %r10, -2*8(%rdi, %rdx)
20130 + movq %rcx, -2*8(%rdi, %rdx)
20131 movq %r11, -1*8(%rdi, %rdx)
20132 + pax_force_retaddr
20133 retq
20134 .p2align 4
20135 .Lless_16bytes:
20136 @@ -153,6 +156,7 @@ ENTRY(memcpy)
20137 movq -1*8(%rsi, %rdx), %r9
20138 movq %r8, 0*8(%rdi)
20139 movq %r9, -1*8(%rdi, %rdx)
20140 + pax_force_retaddr
20141 retq
20142 .p2align 4
20143 .Lless_8bytes:
20144 @@ -166,6 +170,7 @@ ENTRY(memcpy)
20145 movl -4(%rsi, %rdx), %r8d
20146 movl %ecx, (%rdi)
20147 movl %r8d, -4(%rdi, %rdx)
20148 + pax_force_retaddr
20149 retq
20150 .p2align 4
20151 .Lless_3bytes:
20152 @@ -183,6 +188,7 @@ ENTRY(memcpy)
20153 jnz .Lloop_1
20154
20155 .Lend:
20156 + pax_force_retaddr
20157 retq
20158 CFI_ENDPROC
20159 ENDPROC(memcpy)
20160 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
20161 index ee16461..c39c199 100644
20162 --- a/arch/x86/lib/memmove_64.S
20163 +++ b/arch/x86/lib/memmove_64.S
20164 @@ -61,13 +61,13 @@ ENTRY(memmove)
20165 5:
20166 sub $0x20, %rdx
20167 movq 0*8(%rsi), %r11
20168 - movq 1*8(%rsi), %r10
20169 + movq 1*8(%rsi), %rcx
20170 movq 2*8(%rsi), %r9
20171 movq 3*8(%rsi), %r8
20172 leaq 4*8(%rsi), %rsi
20173
20174 movq %r11, 0*8(%rdi)
20175 - movq %r10, 1*8(%rdi)
20176 + movq %rcx, 1*8(%rdi)
20177 movq %r9, 2*8(%rdi)
20178 movq %r8, 3*8(%rdi)
20179 leaq 4*8(%rdi), %rdi
20180 @@ -81,10 +81,10 @@ ENTRY(memmove)
20181 4:
20182 movq %rdx, %rcx
20183 movq -8(%rsi, %rdx), %r11
20184 - lea -8(%rdi, %rdx), %r10
20185 + lea -8(%rdi, %rdx), %r9
20186 shrq $3, %rcx
20187 rep movsq
20188 - movq %r11, (%r10)
20189 + movq %r11, (%r9)
20190 jmp 13f
20191 .Lmemmove_end_forward:
20192
20193 @@ -95,14 +95,14 @@ ENTRY(memmove)
20194 7:
20195 movq %rdx, %rcx
20196 movq (%rsi), %r11
20197 - movq %rdi, %r10
20198 + movq %rdi, %r9
20199 leaq -8(%rsi, %rdx), %rsi
20200 leaq -8(%rdi, %rdx), %rdi
20201 shrq $3, %rcx
20202 std
20203 rep movsq
20204 cld
20205 - movq %r11, (%r10)
20206 + movq %r11, (%r9)
20207 jmp 13f
20208
20209 /*
20210 @@ -127,13 +127,13 @@ ENTRY(memmove)
20211 8:
20212 subq $0x20, %rdx
20213 movq -1*8(%rsi), %r11
20214 - movq -2*8(%rsi), %r10
20215 + movq -2*8(%rsi), %rcx
20216 movq -3*8(%rsi), %r9
20217 movq -4*8(%rsi), %r8
20218 leaq -4*8(%rsi), %rsi
20219
20220 movq %r11, -1*8(%rdi)
20221 - movq %r10, -2*8(%rdi)
20222 + movq %rcx, -2*8(%rdi)
20223 movq %r9, -3*8(%rdi)
20224 movq %r8, -4*8(%rdi)
20225 leaq -4*8(%rdi), %rdi
20226 @@ -151,11 +151,11 @@ ENTRY(memmove)
20227 * Move data from 16 bytes to 31 bytes.
20228 */
20229 movq 0*8(%rsi), %r11
20230 - movq 1*8(%rsi), %r10
20231 + movq 1*8(%rsi), %rcx
20232 movq -2*8(%rsi, %rdx), %r9
20233 movq -1*8(%rsi, %rdx), %r8
20234 movq %r11, 0*8(%rdi)
20235 - movq %r10, 1*8(%rdi)
20236 + movq %rcx, 1*8(%rdi)
20237 movq %r9, -2*8(%rdi, %rdx)
20238 movq %r8, -1*8(%rdi, %rdx)
20239 jmp 13f
20240 @@ -167,9 +167,9 @@ ENTRY(memmove)
20241 * Move data from 8 bytes to 15 bytes.
20242 */
20243 movq 0*8(%rsi), %r11
20244 - movq -1*8(%rsi, %rdx), %r10
20245 + movq -1*8(%rsi, %rdx), %r9
20246 movq %r11, 0*8(%rdi)
20247 - movq %r10, -1*8(%rdi, %rdx)
20248 + movq %r9, -1*8(%rdi, %rdx)
20249 jmp 13f
20250 10:
20251 cmpq $4, %rdx
20252 @@ -178,9 +178,9 @@ ENTRY(memmove)
20253 * Move data from 4 bytes to 7 bytes.
20254 */
20255 movl (%rsi), %r11d
20256 - movl -4(%rsi, %rdx), %r10d
20257 + movl -4(%rsi, %rdx), %r9d
20258 movl %r11d, (%rdi)
20259 - movl %r10d, -4(%rdi, %rdx)
20260 + movl %r9d, -4(%rdi, %rdx)
20261 jmp 13f
20262 11:
20263 cmp $2, %rdx
20264 @@ -189,9 +189,9 @@ ENTRY(memmove)
20265 * Move data from 2 bytes to 3 bytes.
20266 */
20267 movw (%rsi), %r11w
20268 - movw -2(%rsi, %rdx), %r10w
20269 + movw -2(%rsi, %rdx), %r9w
20270 movw %r11w, (%rdi)
20271 - movw %r10w, -2(%rdi, %rdx)
20272 + movw %r9w, -2(%rdi, %rdx)
20273 jmp 13f
20274 12:
20275 cmp $1, %rdx
20276 @@ -202,6 +202,7 @@ ENTRY(memmove)
20277 movb (%rsi), %r11b
20278 movb %r11b, (%rdi)
20279 13:
20280 + pax_force_retaddr
20281 retq
20282 CFI_ENDPROC
20283
20284 @@ -210,6 +211,7 @@ ENTRY(memmove)
20285 /* Forward moving data. */
20286 movq %rdx, %rcx
20287 rep movsb
20288 + pax_force_retaddr
20289 retq
20290 .Lmemmove_end_forward_efs:
20291 .previous
20292 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20293 index 79bd454..dff325a 100644
20294 --- a/arch/x86/lib/memset_64.S
20295 +++ b/arch/x86/lib/memset_64.S
20296 @@ -31,6 +31,7 @@
20297 movl %r8d,%ecx
20298 rep stosb
20299 movq %r9,%rax
20300 + pax_force_retaddr
20301 ret
20302 .Lmemset_e:
20303 .previous
20304 @@ -53,6 +54,7 @@
20305 movl %edx,%ecx
20306 rep stosb
20307 movq %r9,%rax
20308 + pax_force_retaddr
20309 ret
20310 .Lmemset_e_e:
20311 .previous
20312 @@ -60,13 +62,13 @@
20313 ENTRY(memset)
20314 ENTRY(__memset)
20315 CFI_STARTPROC
20316 - movq %rdi,%r10
20317 movq %rdx,%r11
20318
20319 /* expand byte value */
20320 movzbl %sil,%ecx
20321 movabs $0x0101010101010101,%rax
20322 mul %rcx /* with rax, clobbers rdx */
20323 + movq %rdi,%rdx
20324
20325 /* align dst */
20326 movl %edi,%r9d
20327 @@ -120,7 +122,8 @@ ENTRY(__memset)
20328 jnz .Lloop_1
20329
20330 .Lende:
20331 - movq %r10,%rax
20332 + movq %rdx,%rax
20333 + pax_force_retaddr
20334 ret
20335
20336 CFI_RESTORE_STATE
20337 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20338 index c9f2d9b..e7fd2c0 100644
20339 --- a/arch/x86/lib/mmx_32.c
20340 +++ b/arch/x86/lib/mmx_32.c
20341 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20342 {
20343 void *p;
20344 int i;
20345 + unsigned long cr0;
20346
20347 if (unlikely(in_interrupt()))
20348 return __memcpy(to, from, len);
20349 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20350 kernel_fpu_begin();
20351
20352 __asm__ __volatile__ (
20353 - "1: prefetch (%0)\n" /* This set is 28 bytes */
20354 - " prefetch 64(%0)\n"
20355 - " prefetch 128(%0)\n"
20356 - " prefetch 192(%0)\n"
20357 - " prefetch 256(%0)\n"
20358 + "1: prefetch (%1)\n" /* This set is 28 bytes */
20359 + " prefetch 64(%1)\n"
20360 + " prefetch 128(%1)\n"
20361 + " prefetch 192(%1)\n"
20362 + " prefetch 256(%1)\n"
20363 "2: \n"
20364 ".section .fixup, \"ax\"\n"
20365 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20366 + "3: \n"
20367 +
20368 +#ifdef CONFIG_PAX_KERNEXEC
20369 + " movl %%cr0, %0\n"
20370 + " movl %0, %%eax\n"
20371 + " andl $0xFFFEFFFF, %%eax\n"
20372 + " movl %%eax, %%cr0\n"
20373 +#endif
20374 +
20375 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20376 +
20377 +#ifdef CONFIG_PAX_KERNEXEC
20378 + " movl %0, %%cr0\n"
20379 +#endif
20380 +
20381 " jmp 2b\n"
20382 ".previous\n"
20383 _ASM_EXTABLE(1b, 3b)
20384 - : : "r" (from));
20385 + : "=&r" (cr0) : "r" (from) : "ax");
20386
20387 for ( ; i > 5; i--) {
20388 __asm__ __volatile__ (
20389 - "1: prefetch 320(%0)\n"
20390 - "2: movq (%0), %%mm0\n"
20391 - " movq 8(%0), %%mm1\n"
20392 - " movq 16(%0), %%mm2\n"
20393 - " movq 24(%0), %%mm3\n"
20394 - " movq %%mm0, (%1)\n"
20395 - " movq %%mm1, 8(%1)\n"
20396 - " movq %%mm2, 16(%1)\n"
20397 - " movq %%mm3, 24(%1)\n"
20398 - " movq 32(%0), %%mm0\n"
20399 - " movq 40(%0), %%mm1\n"
20400 - " movq 48(%0), %%mm2\n"
20401 - " movq 56(%0), %%mm3\n"
20402 - " movq %%mm0, 32(%1)\n"
20403 - " movq %%mm1, 40(%1)\n"
20404 - " movq %%mm2, 48(%1)\n"
20405 - " movq %%mm3, 56(%1)\n"
20406 + "1: prefetch 320(%1)\n"
20407 + "2: movq (%1), %%mm0\n"
20408 + " movq 8(%1), %%mm1\n"
20409 + " movq 16(%1), %%mm2\n"
20410 + " movq 24(%1), %%mm3\n"
20411 + " movq %%mm0, (%2)\n"
20412 + " movq %%mm1, 8(%2)\n"
20413 + " movq %%mm2, 16(%2)\n"
20414 + " movq %%mm3, 24(%2)\n"
20415 + " movq 32(%1), %%mm0\n"
20416 + " movq 40(%1), %%mm1\n"
20417 + " movq 48(%1), %%mm2\n"
20418 + " movq 56(%1), %%mm3\n"
20419 + " movq %%mm0, 32(%2)\n"
20420 + " movq %%mm1, 40(%2)\n"
20421 + " movq %%mm2, 48(%2)\n"
20422 + " movq %%mm3, 56(%2)\n"
20423 ".section .fixup, \"ax\"\n"
20424 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20425 + "3:\n"
20426 +
20427 +#ifdef CONFIG_PAX_KERNEXEC
20428 + " movl %%cr0, %0\n"
20429 + " movl %0, %%eax\n"
20430 + " andl $0xFFFEFFFF, %%eax\n"
20431 + " movl %%eax, %%cr0\n"
20432 +#endif
20433 +
20434 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20435 +
20436 +#ifdef CONFIG_PAX_KERNEXEC
20437 + " movl %0, %%cr0\n"
20438 +#endif
20439 +
20440 " jmp 2b\n"
20441 ".previous\n"
20442 _ASM_EXTABLE(1b, 3b)
20443 - : : "r" (from), "r" (to) : "memory");
20444 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20445
20446 from += 64;
20447 to += 64;
20448 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20449 static void fast_copy_page(void *to, void *from)
20450 {
20451 int i;
20452 + unsigned long cr0;
20453
20454 kernel_fpu_begin();
20455
20456 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20457 * but that is for later. -AV
20458 */
20459 __asm__ __volatile__(
20460 - "1: prefetch (%0)\n"
20461 - " prefetch 64(%0)\n"
20462 - " prefetch 128(%0)\n"
20463 - " prefetch 192(%0)\n"
20464 - " prefetch 256(%0)\n"
20465 + "1: prefetch (%1)\n"
20466 + " prefetch 64(%1)\n"
20467 + " prefetch 128(%1)\n"
20468 + " prefetch 192(%1)\n"
20469 + " prefetch 256(%1)\n"
20470 "2: \n"
20471 ".section .fixup, \"ax\"\n"
20472 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20473 + "3: \n"
20474 +
20475 +#ifdef CONFIG_PAX_KERNEXEC
20476 + " movl %%cr0, %0\n"
20477 + " movl %0, %%eax\n"
20478 + " andl $0xFFFEFFFF, %%eax\n"
20479 + " movl %%eax, %%cr0\n"
20480 +#endif
20481 +
20482 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20483 +
20484 +#ifdef CONFIG_PAX_KERNEXEC
20485 + " movl %0, %%cr0\n"
20486 +#endif
20487 +
20488 " jmp 2b\n"
20489 ".previous\n"
20490 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20491 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20492
20493 for (i = 0; i < (4096-320)/64; i++) {
20494 __asm__ __volatile__ (
20495 - "1: prefetch 320(%0)\n"
20496 - "2: movq (%0), %%mm0\n"
20497 - " movntq %%mm0, (%1)\n"
20498 - " movq 8(%0), %%mm1\n"
20499 - " movntq %%mm1, 8(%1)\n"
20500 - " movq 16(%0), %%mm2\n"
20501 - " movntq %%mm2, 16(%1)\n"
20502 - " movq 24(%0), %%mm3\n"
20503 - " movntq %%mm3, 24(%1)\n"
20504 - " movq 32(%0), %%mm4\n"
20505 - " movntq %%mm4, 32(%1)\n"
20506 - " movq 40(%0), %%mm5\n"
20507 - " movntq %%mm5, 40(%1)\n"
20508 - " movq 48(%0), %%mm6\n"
20509 - " movntq %%mm6, 48(%1)\n"
20510 - " movq 56(%0), %%mm7\n"
20511 - " movntq %%mm7, 56(%1)\n"
20512 + "1: prefetch 320(%1)\n"
20513 + "2: movq (%1), %%mm0\n"
20514 + " movntq %%mm0, (%2)\n"
20515 + " movq 8(%1), %%mm1\n"
20516 + " movntq %%mm1, 8(%2)\n"
20517 + " movq 16(%1), %%mm2\n"
20518 + " movntq %%mm2, 16(%2)\n"
20519 + " movq 24(%1), %%mm3\n"
20520 + " movntq %%mm3, 24(%2)\n"
20521 + " movq 32(%1), %%mm4\n"
20522 + " movntq %%mm4, 32(%2)\n"
20523 + " movq 40(%1), %%mm5\n"
20524 + " movntq %%mm5, 40(%2)\n"
20525 + " movq 48(%1), %%mm6\n"
20526 + " movntq %%mm6, 48(%2)\n"
20527 + " movq 56(%1), %%mm7\n"
20528 + " movntq %%mm7, 56(%2)\n"
20529 ".section .fixup, \"ax\"\n"
20530 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20531 + "3:\n"
20532 +
20533 +#ifdef CONFIG_PAX_KERNEXEC
20534 + " movl %%cr0, %0\n"
20535 + " movl %0, %%eax\n"
20536 + " andl $0xFFFEFFFF, %%eax\n"
20537 + " movl %%eax, %%cr0\n"
20538 +#endif
20539 +
20540 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20541 +
20542 +#ifdef CONFIG_PAX_KERNEXEC
20543 + " movl %0, %%cr0\n"
20544 +#endif
20545 +
20546 " jmp 2b\n"
20547 ".previous\n"
20548 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20549 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20550
20551 from += 64;
20552 to += 64;
20553 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20554 static void fast_copy_page(void *to, void *from)
20555 {
20556 int i;
20557 + unsigned long cr0;
20558
20559 kernel_fpu_begin();
20560
20561 __asm__ __volatile__ (
20562 - "1: prefetch (%0)\n"
20563 - " prefetch 64(%0)\n"
20564 - " prefetch 128(%0)\n"
20565 - " prefetch 192(%0)\n"
20566 - " prefetch 256(%0)\n"
20567 + "1: prefetch (%1)\n"
20568 + " prefetch 64(%1)\n"
20569 + " prefetch 128(%1)\n"
20570 + " prefetch 192(%1)\n"
20571 + " prefetch 256(%1)\n"
20572 "2: \n"
20573 ".section .fixup, \"ax\"\n"
20574 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20575 + "3: \n"
20576 +
20577 +#ifdef CONFIG_PAX_KERNEXEC
20578 + " movl %%cr0, %0\n"
20579 + " movl %0, %%eax\n"
20580 + " andl $0xFFFEFFFF, %%eax\n"
20581 + " movl %%eax, %%cr0\n"
20582 +#endif
20583 +
20584 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20585 +
20586 +#ifdef CONFIG_PAX_KERNEXEC
20587 + " movl %0, %%cr0\n"
20588 +#endif
20589 +
20590 " jmp 2b\n"
20591 ".previous\n"
20592 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20593 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20594
20595 for (i = 0; i < 4096/64; i++) {
20596 __asm__ __volatile__ (
20597 - "1: prefetch 320(%0)\n"
20598 - "2: movq (%0), %%mm0\n"
20599 - " movq 8(%0), %%mm1\n"
20600 - " movq 16(%0), %%mm2\n"
20601 - " movq 24(%0), %%mm3\n"
20602 - " movq %%mm0, (%1)\n"
20603 - " movq %%mm1, 8(%1)\n"
20604 - " movq %%mm2, 16(%1)\n"
20605 - " movq %%mm3, 24(%1)\n"
20606 - " movq 32(%0), %%mm0\n"
20607 - " movq 40(%0), %%mm1\n"
20608 - " movq 48(%0), %%mm2\n"
20609 - " movq 56(%0), %%mm3\n"
20610 - " movq %%mm0, 32(%1)\n"
20611 - " movq %%mm1, 40(%1)\n"
20612 - " movq %%mm2, 48(%1)\n"
20613 - " movq %%mm3, 56(%1)\n"
20614 + "1: prefetch 320(%1)\n"
20615 + "2: movq (%1), %%mm0\n"
20616 + " movq 8(%1), %%mm1\n"
20617 + " movq 16(%1), %%mm2\n"
20618 + " movq 24(%1), %%mm3\n"
20619 + " movq %%mm0, (%2)\n"
20620 + " movq %%mm1, 8(%2)\n"
20621 + " movq %%mm2, 16(%2)\n"
20622 + " movq %%mm3, 24(%2)\n"
20623 + " movq 32(%1), %%mm0\n"
20624 + " movq 40(%1), %%mm1\n"
20625 + " movq 48(%1), %%mm2\n"
20626 + " movq 56(%1), %%mm3\n"
20627 + " movq %%mm0, 32(%2)\n"
20628 + " movq %%mm1, 40(%2)\n"
20629 + " movq %%mm2, 48(%2)\n"
20630 + " movq %%mm3, 56(%2)\n"
20631 ".section .fixup, \"ax\"\n"
20632 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20633 + "3:\n"
20634 +
20635 +#ifdef CONFIG_PAX_KERNEXEC
20636 + " movl %%cr0, %0\n"
20637 + " movl %0, %%eax\n"
20638 + " andl $0xFFFEFFFF, %%eax\n"
20639 + " movl %%eax, %%cr0\n"
20640 +#endif
20641 +
20642 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20643 +
20644 +#ifdef CONFIG_PAX_KERNEXEC
20645 + " movl %0, %%cr0\n"
20646 +#endif
20647 +
20648 " jmp 2b\n"
20649 ".previous\n"
20650 _ASM_EXTABLE(1b, 3b)
20651 - : : "r" (from), "r" (to) : "memory");
20652 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20653
20654 from += 64;
20655 to += 64;
20656 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20657 index 69fa106..adda88b 100644
20658 --- a/arch/x86/lib/msr-reg.S
20659 +++ b/arch/x86/lib/msr-reg.S
20660 @@ -3,6 +3,7 @@
20661 #include <asm/dwarf2.h>
20662 #include <asm/asm.h>
20663 #include <asm/msr.h>
20664 +#include <asm/alternative-asm.h>
20665
20666 #ifdef CONFIG_X86_64
20667 /*
20668 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20669 CFI_STARTPROC
20670 pushq_cfi %rbx
20671 pushq_cfi %rbp
20672 - movq %rdi, %r10 /* Save pointer */
20673 + movq %rdi, %r9 /* Save pointer */
20674 xorl %r11d, %r11d /* Return value */
20675 movl (%rdi), %eax
20676 movl 4(%rdi), %ecx
20677 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20678 movl 28(%rdi), %edi
20679 CFI_REMEMBER_STATE
20680 1: \op
20681 -2: movl %eax, (%r10)
20682 +2: movl %eax, (%r9)
20683 movl %r11d, %eax /* Return value */
20684 - movl %ecx, 4(%r10)
20685 - movl %edx, 8(%r10)
20686 - movl %ebx, 12(%r10)
20687 - movl %ebp, 20(%r10)
20688 - movl %esi, 24(%r10)
20689 - movl %edi, 28(%r10)
20690 + movl %ecx, 4(%r9)
20691 + movl %edx, 8(%r9)
20692 + movl %ebx, 12(%r9)
20693 + movl %ebp, 20(%r9)
20694 + movl %esi, 24(%r9)
20695 + movl %edi, 28(%r9)
20696 popq_cfi %rbp
20697 popq_cfi %rbx
20698 + pax_force_retaddr
20699 ret
20700 3:
20701 CFI_RESTORE_STATE
20702 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20703 index 36b0d15..d381858 100644
20704 --- a/arch/x86/lib/putuser.S
20705 +++ b/arch/x86/lib/putuser.S
20706 @@ -15,7 +15,9 @@
20707 #include <asm/thread_info.h>
20708 #include <asm/errno.h>
20709 #include <asm/asm.h>
20710 -
20711 +#include <asm/segment.h>
20712 +#include <asm/pgtable.h>
20713 +#include <asm/alternative-asm.h>
20714
20715 /*
20716 * __put_user_X
20717 @@ -29,52 +31,119 @@
20718 * as they get called from within inline assembly.
20719 */
20720
20721 -#define ENTER CFI_STARTPROC ; \
20722 - GET_THREAD_INFO(%_ASM_BX)
20723 -#define EXIT ret ; \
20724 +#define ENTER CFI_STARTPROC
20725 +#define EXIT pax_force_retaddr; ret ; \
20726 CFI_ENDPROC
20727
20728 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20729 +#define _DEST %_ASM_CX,%_ASM_BX
20730 +#else
20731 +#define _DEST %_ASM_CX
20732 +#endif
20733 +
20734 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20735 +#define __copyuser_seg gs;
20736 +#else
20737 +#define __copyuser_seg
20738 +#endif
20739 +
20740 .text
20741 ENTRY(__put_user_1)
20742 ENTER
20743 +
20744 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20745 + GET_THREAD_INFO(%_ASM_BX)
20746 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20747 jae bad_put_user
20748 -1: movb %al,(%_ASM_CX)
20749 +
20750 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20751 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20752 + cmp %_ASM_BX,%_ASM_CX
20753 + jb 1234f
20754 + xor %ebx,%ebx
20755 +1234:
20756 +#endif
20757 +
20758 +#endif
20759 +
20760 +1: __copyuser_seg movb %al,(_DEST)
20761 xor %eax,%eax
20762 EXIT
20763 ENDPROC(__put_user_1)
20764
20765 ENTRY(__put_user_2)
20766 ENTER
20767 +
20768 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20769 + GET_THREAD_INFO(%_ASM_BX)
20770 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20771 sub $1,%_ASM_BX
20772 cmp %_ASM_BX,%_ASM_CX
20773 jae bad_put_user
20774 -2: movw %ax,(%_ASM_CX)
20775 +
20776 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20777 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20778 + cmp %_ASM_BX,%_ASM_CX
20779 + jb 1234f
20780 + xor %ebx,%ebx
20781 +1234:
20782 +#endif
20783 +
20784 +#endif
20785 +
20786 +2: __copyuser_seg movw %ax,(_DEST)
20787 xor %eax,%eax
20788 EXIT
20789 ENDPROC(__put_user_2)
20790
20791 ENTRY(__put_user_4)
20792 ENTER
20793 +
20794 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20795 + GET_THREAD_INFO(%_ASM_BX)
20796 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20797 sub $3,%_ASM_BX
20798 cmp %_ASM_BX,%_ASM_CX
20799 jae bad_put_user
20800 -3: movl %eax,(%_ASM_CX)
20801 +
20802 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20803 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20804 + cmp %_ASM_BX,%_ASM_CX
20805 + jb 1234f
20806 + xor %ebx,%ebx
20807 +1234:
20808 +#endif
20809 +
20810 +#endif
20811 +
20812 +3: __copyuser_seg movl %eax,(_DEST)
20813 xor %eax,%eax
20814 EXIT
20815 ENDPROC(__put_user_4)
20816
20817 ENTRY(__put_user_8)
20818 ENTER
20819 +
20820 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20821 + GET_THREAD_INFO(%_ASM_BX)
20822 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20823 sub $7,%_ASM_BX
20824 cmp %_ASM_BX,%_ASM_CX
20825 jae bad_put_user
20826 -4: mov %_ASM_AX,(%_ASM_CX)
20827 +
20828 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20829 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20830 + cmp %_ASM_BX,%_ASM_CX
20831 + jb 1234f
20832 + xor %ebx,%ebx
20833 +1234:
20834 +#endif
20835 +
20836 +#endif
20837 +
20838 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20839 #ifdef CONFIG_X86_32
20840 -5: movl %edx,4(%_ASM_CX)
20841 +5: __copyuser_seg movl %edx,4(_DEST)
20842 #endif
20843 xor %eax,%eax
20844 EXIT
20845 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20846 index 1cad221..de671ee 100644
20847 --- a/arch/x86/lib/rwlock.S
20848 +++ b/arch/x86/lib/rwlock.S
20849 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20850 FRAME
20851 0: LOCK_PREFIX
20852 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20853 +
20854 +#ifdef CONFIG_PAX_REFCOUNT
20855 + jno 1234f
20856 + LOCK_PREFIX
20857 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20858 + int $4
20859 +1234:
20860 + _ASM_EXTABLE(1234b, 1234b)
20861 +#endif
20862 +
20863 1: rep; nop
20864 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20865 jne 1b
20866 LOCK_PREFIX
20867 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20868 +
20869 +#ifdef CONFIG_PAX_REFCOUNT
20870 + jno 1234f
20871 + LOCK_PREFIX
20872 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20873 + int $4
20874 +1234:
20875 + _ASM_EXTABLE(1234b, 1234b)
20876 +#endif
20877 +
20878 jnz 0b
20879 ENDFRAME
20880 + pax_force_retaddr
20881 ret
20882 CFI_ENDPROC
20883 END(__write_lock_failed)
20884 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20885 FRAME
20886 0: LOCK_PREFIX
20887 READ_LOCK_SIZE(inc) (%__lock_ptr)
20888 +
20889 +#ifdef CONFIG_PAX_REFCOUNT
20890 + jno 1234f
20891 + LOCK_PREFIX
20892 + READ_LOCK_SIZE(dec) (%__lock_ptr)
20893 + int $4
20894 +1234:
20895 + _ASM_EXTABLE(1234b, 1234b)
20896 +#endif
20897 +
20898 1: rep; nop
20899 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20900 js 1b
20901 LOCK_PREFIX
20902 READ_LOCK_SIZE(dec) (%__lock_ptr)
20903 +
20904 +#ifdef CONFIG_PAX_REFCOUNT
20905 + jno 1234f
20906 + LOCK_PREFIX
20907 + READ_LOCK_SIZE(inc) (%__lock_ptr)
20908 + int $4
20909 +1234:
20910 + _ASM_EXTABLE(1234b, 1234b)
20911 +#endif
20912 +
20913 js 0b
20914 ENDFRAME
20915 + pax_force_retaddr
20916 ret
20917 CFI_ENDPROC
20918 END(__read_lock_failed)
20919 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20920 index 5dff5f0..cadebf4 100644
20921 --- a/arch/x86/lib/rwsem.S
20922 +++ b/arch/x86/lib/rwsem.S
20923 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20924 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20925 CFI_RESTORE __ASM_REG(dx)
20926 restore_common_regs
20927 + pax_force_retaddr
20928 ret
20929 CFI_ENDPROC
20930 ENDPROC(call_rwsem_down_read_failed)
20931 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20932 movq %rax,%rdi
20933 call rwsem_down_write_failed
20934 restore_common_regs
20935 + pax_force_retaddr
20936 ret
20937 CFI_ENDPROC
20938 ENDPROC(call_rwsem_down_write_failed)
20939 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20940 movq %rax,%rdi
20941 call rwsem_wake
20942 restore_common_regs
20943 -1: ret
20944 +1: pax_force_retaddr
20945 + ret
20946 CFI_ENDPROC
20947 ENDPROC(call_rwsem_wake)
20948
20949 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20950 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20951 CFI_RESTORE __ASM_REG(dx)
20952 restore_common_regs
20953 + pax_force_retaddr
20954 ret
20955 CFI_ENDPROC
20956 ENDPROC(call_rwsem_downgrade_wake)
20957 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20958 index a63efd6..ccecad8 100644
20959 --- a/arch/x86/lib/thunk_64.S
20960 +++ b/arch/x86/lib/thunk_64.S
20961 @@ -8,6 +8,7 @@
20962 #include <linux/linkage.h>
20963 #include <asm/dwarf2.h>
20964 #include <asm/calling.h>
20965 +#include <asm/alternative-asm.h>
20966
20967 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20968 .macro THUNK name, func, put_ret_addr_in_rdi=0
20969 @@ -41,5 +42,6 @@
20970 SAVE_ARGS
20971 restore:
20972 RESTORE_ARGS
20973 + pax_force_retaddr
20974 ret
20975 CFI_ENDPROC
20976 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20977 index e218d5d..35679b4 100644
20978 --- a/arch/x86/lib/usercopy_32.c
20979 +++ b/arch/x86/lib/usercopy_32.c
20980 @@ -43,7 +43,7 @@ do { \
20981 __asm__ __volatile__( \
20982 " testl %1,%1\n" \
20983 " jz 2f\n" \
20984 - "0: lodsb\n" \
20985 + "0: "__copyuser_seg"lodsb\n" \
20986 " stosb\n" \
20987 " testb %%al,%%al\n" \
20988 " jz 1f\n" \
20989 @@ -128,10 +128,12 @@ do { \
20990 int __d0; \
20991 might_fault(); \
20992 __asm__ __volatile__( \
20993 + __COPYUSER_SET_ES \
20994 "0: rep; stosl\n" \
20995 " movl %2,%0\n" \
20996 "1: rep; stosb\n" \
20997 "2:\n" \
20998 + __COPYUSER_RESTORE_ES \
20999 ".section .fixup,\"ax\"\n" \
21000 "3: lea 0(%2,%0,4),%0\n" \
21001 " jmp 2b\n" \
21002 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
21003 might_fault();
21004
21005 __asm__ __volatile__(
21006 + __COPYUSER_SET_ES
21007 " testl %0, %0\n"
21008 " jz 3f\n"
21009 " andl %0,%%ecx\n"
21010 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
21011 " subl %%ecx,%0\n"
21012 " addl %0,%%eax\n"
21013 "1:\n"
21014 + __COPYUSER_RESTORE_ES
21015 ".section .fixup,\"ax\"\n"
21016 "2: xorl %%eax,%%eax\n"
21017 " jmp 1b\n"
21018 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
21019
21020 #ifdef CONFIG_X86_INTEL_USERCOPY
21021 static unsigned long
21022 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
21023 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
21024 {
21025 int d0, d1;
21026 __asm__ __volatile__(
21027 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
21028 " .align 2,0x90\n"
21029 "3: movl 0(%4), %%eax\n"
21030 "4: movl 4(%4), %%edx\n"
21031 - "5: movl %%eax, 0(%3)\n"
21032 - "6: movl %%edx, 4(%3)\n"
21033 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
21034 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
21035 "7: movl 8(%4), %%eax\n"
21036 "8: movl 12(%4),%%edx\n"
21037 - "9: movl %%eax, 8(%3)\n"
21038 - "10: movl %%edx, 12(%3)\n"
21039 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
21040 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
21041 "11: movl 16(%4), %%eax\n"
21042 "12: movl 20(%4), %%edx\n"
21043 - "13: movl %%eax, 16(%3)\n"
21044 - "14: movl %%edx, 20(%3)\n"
21045 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
21046 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
21047 "15: movl 24(%4), %%eax\n"
21048 "16: movl 28(%4), %%edx\n"
21049 - "17: movl %%eax, 24(%3)\n"
21050 - "18: movl %%edx, 28(%3)\n"
21051 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
21052 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
21053 "19: movl 32(%4), %%eax\n"
21054 "20: movl 36(%4), %%edx\n"
21055 - "21: movl %%eax, 32(%3)\n"
21056 - "22: movl %%edx, 36(%3)\n"
21057 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
21058 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
21059 "23: movl 40(%4), %%eax\n"
21060 "24: movl 44(%4), %%edx\n"
21061 - "25: movl %%eax, 40(%3)\n"
21062 - "26: movl %%edx, 44(%3)\n"
21063 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
21064 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
21065 "27: movl 48(%4), %%eax\n"
21066 "28: movl 52(%4), %%edx\n"
21067 - "29: movl %%eax, 48(%3)\n"
21068 - "30: movl %%edx, 52(%3)\n"
21069 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
21070 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
21071 "31: movl 56(%4), %%eax\n"
21072 "32: movl 60(%4), %%edx\n"
21073 - "33: movl %%eax, 56(%3)\n"
21074 - "34: movl %%edx, 60(%3)\n"
21075 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
21076 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
21077 " addl $-64, %0\n"
21078 " addl $64, %4\n"
21079 " addl $64, %3\n"
21080 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
21081 " shrl $2, %0\n"
21082 " andl $3, %%eax\n"
21083 " cld\n"
21084 + __COPYUSER_SET_ES
21085 "99: rep; movsl\n"
21086 "36: movl %%eax, %0\n"
21087 "37: rep; movsb\n"
21088 "100:\n"
21089 + __COPYUSER_RESTORE_ES
21090 + ".section .fixup,\"ax\"\n"
21091 + "101: lea 0(%%eax,%0,4),%0\n"
21092 + " jmp 100b\n"
21093 + ".previous\n"
21094 + ".section __ex_table,\"a\"\n"
21095 + " .align 4\n"
21096 + " .long 1b,100b\n"
21097 + " .long 2b,100b\n"
21098 + " .long 3b,100b\n"
21099 + " .long 4b,100b\n"
21100 + " .long 5b,100b\n"
21101 + " .long 6b,100b\n"
21102 + " .long 7b,100b\n"
21103 + " .long 8b,100b\n"
21104 + " .long 9b,100b\n"
21105 + " .long 10b,100b\n"
21106 + " .long 11b,100b\n"
21107 + " .long 12b,100b\n"
21108 + " .long 13b,100b\n"
21109 + " .long 14b,100b\n"
21110 + " .long 15b,100b\n"
21111 + " .long 16b,100b\n"
21112 + " .long 17b,100b\n"
21113 + " .long 18b,100b\n"
21114 + " .long 19b,100b\n"
21115 + " .long 20b,100b\n"
21116 + " .long 21b,100b\n"
21117 + " .long 22b,100b\n"
21118 + " .long 23b,100b\n"
21119 + " .long 24b,100b\n"
21120 + " .long 25b,100b\n"
21121 + " .long 26b,100b\n"
21122 + " .long 27b,100b\n"
21123 + " .long 28b,100b\n"
21124 + " .long 29b,100b\n"
21125 + " .long 30b,100b\n"
21126 + " .long 31b,100b\n"
21127 + " .long 32b,100b\n"
21128 + " .long 33b,100b\n"
21129 + " .long 34b,100b\n"
21130 + " .long 35b,100b\n"
21131 + " .long 36b,100b\n"
21132 + " .long 37b,100b\n"
21133 + " .long 99b,101b\n"
21134 + ".previous"
21135 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
21136 + : "1"(to), "2"(from), "0"(size)
21137 + : "eax", "edx", "memory");
21138 + return size;
21139 +}
21140 +
21141 +static unsigned long
21142 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
21143 +{
21144 + int d0, d1;
21145 + __asm__ __volatile__(
21146 + " .align 2,0x90\n"
21147 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
21148 + " cmpl $67, %0\n"
21149 + " jbe 3f\n"
21150 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
21151 + " .align 2,0x90\n"
21152 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
21153 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
21154 + "5: movl %%eax, 0(%3)\n"
21155 + "6: movl %%edx, 4(%3)\n"
21156 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
21157 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
21158 + "9: movl %%eax, 8(%3)\n"
21159 + "10: movl %%edx, 12(%3)\n"
21160 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
21161 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
21162 + "13: movl %%eax, 16(%3)\n"
21163 + "14: movl %%edx, 20(%3)\n"
21164 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
21165 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
21166 + "17: movl %%eax, 24(%3)\n"
21167 + "18: movl %%edx, 28(%3)\n"
21168 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
21169 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
21170 + "21: movl %%eax, 32(%3)\n"
21171 + "22: movl %%edx, 36(%3)\n"
21172 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
21173 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
21174 + "25: movl %%eax, 40(%3)\n"
21175 + "26: movl %%edx, 44(%3)\n"
21176 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
21177 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
21178 + "29: movl %%eax, 48(%3)\n"
21179 + "30: movl %%edx, 52(%3)\n"
21180 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
21181 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
21182 + "33: movl %%eax, 56(%3)\n"
21183 + "34: movl %%edx, 60(%3)\n"
21184 + " addl $-64, %0\n"
21185 + " addl $64, %4\n"
21186 + " addl $64, %3\n"
21187 + " cmpl $63, %0\n"
21188 + " ja 1b\n"
21189 + "35: movl %0, %%eax\n"
21190 + " shrl $2, %0\n"
21191 + " andl $3, %%eax\n"
21192 + " cld\n"
21193 + "99: rep; "__copyuser_seg" movsl\n"
21194 + "36: movl %%eax, %0\n"
21195 + "37: rep; "__copyuser_seg" movsb\n"
21196 + "100:\n"
21197 ".section .fixup,\"ax\"\n"
21198 "101: lea 0(%%eax,%0,4),%0\n"
21199 " jmp 100b\n"
21200 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21201 int d0, d1;
21202 __asm__ __volatile__(
21203 " .align 2,0x90\n"
21204 - "0: movl 32(%4), %%eax\n"
21205 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21206 " cmpl $67, %0\n"
21207 " jbe 2f\n"
21208 - "1: movl 64(%4), %%eax\n"
21209 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21210 " .align 2,0x90\n"
21211 - "2: movl 0(%4), %%eax\n"
21212 - "21: movl 4(%4), %%edx\n"
21213 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21214 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21215 " movl %%eax, 0(%3)\n"
21216 " movl %%edx, 4(%3)\n"
21217 - "3: movl 8(%4), %%eax\n"
21218 - "31: movl 12(%4),%%edx\n"
21219 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21220 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21221 " movl %%eax, 8(%3)\n"
21222 " movl %%edx, 12(%3)\n"
21223 - "4: movl 16(%4), %%eax\n"
21224 - "41: movl 20(%4), %%edx\n"
21225 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21226 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21227 " movl %%eax, 16(%3)\n"
21228 " movl %%edx, 20(%3)\n"
21229 - "10: movl 24(%4), %%eax\n"
21230 - "51: movl 28(%4), %%edx\n"
21231 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21232 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21233 " movl %%eax, 24(%3)\n"
21234 " movl %%edx, 28(%3)\n"
21235 - "11: movl 32(%4), %%eax\n"
21236 - "61: movl 36(%4), %%edx\n"
21237 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21238 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21239 " movl %%eax, 32(%3)\n"
21240 " movl %%edx, 36(%3)\n"
21241 - "12: movl 40(%4), %%eax\n"
21242 - "71: movl 44(%4), %%edx\n"
21243 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21244 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21245 " movl %%eax, 40(%3)\n"
21246 " movl %%edx, 44(%3)\n"
21247 - "13: movl 48(%4), %%eax\n"
21248 - "81: movl 52(%4), %%edx\n"
21249 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21250 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21251 " movl %%eax, 48(%3)\n"
21252 " movl %%edx, 52(%3)\n"
21253 - "14: movl 56(%4), %%eax\n"
21254 - "91: movl 60(%4), %%edx\n"
21255 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21256 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21257 " movl %%eax, 56(%3)\n"
21258 " movl %%edx, 60(%3)\n"
21259 " addl $-64, %0\n"
21260 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21261 " shrl $2, %0\n"
21262 " andl $3, %%eax\n"
21263 " cld\n"
21264 - "6: rep; movsl\n"
21265 + "6: rep; "__copyuser_seg" movsl\n"
21266 " movl %%eax,%0\n"
21267 - "7: rep; movsb\n"
21268 + "7: rep; "__copyuser_seg" movsb\n"
21269 "8:\n"
21270 ".section .fixup,\"ax\"\n"
21271 "9: lea 0(%%eax,%0,4),%0\n"
21272 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21273
21274 __asm__ __volatile__(
21275 " .align 2,0x90\n"
21276 - "0: movl 32(%4), %%eax\n"
21277 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21278 " cmpl $67, %0\n"
21279 " jbe 2f\n"
21280 - "1: movl 64(%4), %%eax\n"
21281 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21282 " .align 2,0x90\n"
21283 - "2: movl 0(%4), %%eax\n"
21284 - "21: movl 4(%4), %%edx\n"
21285 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21286 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21287 " movnti %%eax, 0(%3)\n"
21288 " movnti %%edx, 4(%3)\n"
21289 - "3: movl 8(%4), %%eax\n"
21290 - "31: movl 12(%4),%%edx\n"
21291 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21292 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21293 " movnti %%eax, 8(%3)\n"
21294 " movnti %%edx, 12(%3)\n"
21295 - "4: movl 16(%4), %%eax\n"
21296 - "41: movl 20(%4), %%edx\n"
21297 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21298 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21299 " movnti %%eax, 16(%3)\n"
21300 " movnti %%edx, 20(%3)\n"
21301 - "10: movl 24(%4), %%eax\n"
21302 - "51: movl 28(%4), %%edx\n"
21303 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21304 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21305 " movnti %%eax, 24(%3)\n"
21306 " movnti %%edx, 28(%3)\n"
21307 - "11: movl 32(%4), %%eax\n"
21308 - "61: movl 36(%4), %%edx\n"
21309 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21310 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21311 " movnti %%eax, 32(%3)\n"
21312 " movnti %%edx, 36(%3)\n"
21313 - "12: movl 40(%4), %%eax\n"
21314 - "71: movl 44(%4), %%edx\n"
21315 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21316 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21317 " movnti %%eax, 40(%3)\n"
21318 " movnti %%edx, 44(%3)\n"
21319 - "13: movl 48(%4), %%eax\n"
21320 - "81: movl 52(%4), %%edx\n"
21321 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21322 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21323 " movnti %%eax, 48(%3)\n"
21324 " movnti %%edx, 52(%3)\n"
21325 - "14: movl 56(%4), %%eax\n"
21326 - "91: movl 60(%4), %%edx\n"
21327 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21328 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21329 " movnti %%eax, 56(%3)\n"
21330 " movnti %%edx, 60(%3)\n"
21331 " addl $-64, %0\n"
21332 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21333 " shrl $2, %0\n"
21334 " andl $3, %%eax\n"
21335 " cld\n"
21336 - "6: rep; movsl\n"
21337 + "6: rep; "__copyuser_seg" movsl\n"
21338 " movl %%eax,%0\n"
21339 - "7: rep; movsb\n"
21340 + "7: rep; "__copyuser_seg" movsb\n"
21341 "8:\n"
21342 ".section .fixup,\"ax\"\n"
21343 "9: lea 0(%%eax,%0,4),%0\n"
21344 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21345
21346 __asm__ __volatile__(
21347 " .align 2,0x90\n"
21348 - "0: movl 32(%4), %%eax\n"
21349 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21350 " cmpl $67, %0\n"
21351 " jbe 2f\n"
21352 - "1: movl 64(%4), %%eax\n"
21353 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21354 " .align 2,0x90\n"
21355 - "2: movl 0(%4), %%eax\n"
21356 - "21: movl 4(%4), %%edx\n"
21357 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21358 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21359 " movnti %%eax, 0(%3)\n"
21360 " movnti %%edx, 4(%3)\n"
21361 - "3: movl 8(%4), %%eax\n"
21362 - "31: movl 12(%4),%%edx\n"
21363 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21364 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21365 " movnti %%eax, 8(%3)\n"
21366 " movnti %%edx, 12(%3)\n"
21367 - "4: movl 16(%4), %%eax\n"
21368 - "41: movl 20(%4), %%edx\n"
21369 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21370 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21371 " movnti %%eax, 16(%3)\n"
21372 " movnti %%edx, 20(%3)\n"
21373 - "10: movl 24(%4), %%eax\n"
21374 - "51: movl 28(%4), %%edx\n"
21375 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21376 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21377 " movnti %%eax, 24(%3)\n"
21378 " movnti %%edx, 28(%3)\n"
21379 - "11: movl 32(%4), %%eax\n"
21380 - "61: movl 36(%4), %%edx\n"
21381 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21382 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21383 " movnti %%eax, 32(%3)\n"
21384 " movnti %%edx, 36(%3)\n"
21385 - "12: movl 40(%4), %%eax\n"
21386 - "71: movl 44(%4), %%edx\n"
21387 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21388 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21389 " movnti %%eax, 40(%3)\n"
21390 " movnti %%edx, 44(%3)\n"
21391 - "13: movl 48(%4), %%eax\n"
21392 - "81: movl 52(%4), %%edx\n"
21393 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21394 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21395 " movnti %%eax, 48(%3)\n"
21396 " movnti %%edx, 52(%3)\n"
21397 - "14: movl 56(%4), %%eax\n"
21398 - "91: movl 60(%4), %%edx\n"
21399 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21400 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21401 " movnti %%eax, 56(%3)\n"
21402 " movnti %%edx, 60(%3)\n"
21403 " addl $-64, %0\n"
21404 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21405 " shrl $2, %0\n"
21406 " andl $3, %%eax\n"
21407 " cld\n"
21408 - "6: rep; movsl\n"
21409 + "6: rep; "__copyuser_seg" movsl\n"
21410 " movl %%eax,%0\n"
21411 - "7: rep; movsb\n"
21412 + "7: rep; "__copyuser_seg" movsb\n"
21413 "8:\n"
21414 ".section .fixup,\"ax\"\n"
21415 "9: lea 0(%%eax,%0,4),%0\n"
21416 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21417 */
21418 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21419 unsigned long size);
21420 -unsigned long __copy_user_intel(void __user *to, const void *from,
21421 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21422 + unsigned long size);
21423 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21424 unsigned long size);
21425 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21426 const void __user *from, unsigned long size);
21427 #endif /* CONFIG_X86_INTEL_USERCOPY */
21428
21429 /* Generic arbitrary sized copy. */
21430 -#define __copy_user(to, from, size) \
21431 +#define __copy_user(to, from, size, prefix, set, restore) \
21432 do { \
21433 int __d0, __d1, __d2; \
21434 __asm__ __volatile__( \
21435 + set \
21436 " cmp $7,%0\n" \
21437 " jbe 1f\n" \
21438 " movl %1,%0\n" \
21439 " negl %0\n" \
21440 " andl $7,%0\n" \
21441 " subl %0,%3\n" \
21442 - "4: rep; movsb\n" \
21443 + "4: rep; "prefix"movsb\n" \
21444 " movl %3,%0\n" \
21445 " shrl $2,%0\n" \
21446 " andl $3,%3\n" \
21447 " .align 2,0x90\n" \
21448 - "0: rep; movsl\n" \
21449 + "0: rep; "prefix"movsl\n" \
21450 " movl %3,%0\n" \
21451 - "1: rep; movsb\n" \
21452 + "1: rep; "prefix"movsb\n" \
21453 "2:\n" \
21454 + restore \
21455 ".section .fixup,\"ax\"\n" \
21456 "5: addl %3,%0\n" \
21457 " jmp 2b\n" \
21458 @@ -682,14 +799,14 @@ do { \
21459 " negl %0\n" \
21460 " andl $7,%0\n" \
21461 " subl %0,%3\n" \
21462 - "4: rep; movsb\n" \
21463 + "4: rep; "__copyuser_seg"movsb\n" \
21464 " movl %3,%0\n" \
21465 " shrl $2,%0\n" \
21466 " andl $3,%3\n" \
21467 " .align 2,0x90\n" \
21468 - "0: rep; movsl\n" \
21469 + "0: rep; "__copyuser_seg"movsl\n" \
21470 " movl %3,%0\n" \
21471 - "1: rep; movsb\n" \
21472 + "1: rep; "__copyuser_seg"movsb\n" \
21473 "2:\n" \
21474 ".section .fixup,\"ax\"\n" \
21475 "5: addl %3,%0\n" \
21476 @@ -775,9 +892,9 @@ survive:
21477 }
21478 #endif
21479 if (movsl_is_ok(to, from, n))
21480 - __copy_user(to, from, n);
21481 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21482 else
21483 - n = __copy_user_intel(to, from, n);
21484 + n = __generic_copy_to_user_intel(to, from, n);
21485 return n;
21486 }
21487 EXPORT_SYMBOL(__copy_to_user_ll);
21488 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21489 unsigned long n)
21490 {
21491 if (movsl_is_ok(to, from, n))
21492 - __copy_user(to, from, n);
21493 + __copy_user(to, from, n, __copyuser_seg, "", "");
21494 else
21495 - n = __copy_user_intel((void __user *)to,
21496 - (const void *)from, n);
21497 + n = __generic_copy_from_user_intel(to, from, n);
21498 return n;
21499 }
21500 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21501 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21502 if (n > 64 && cpu_has_xmm2)
21503 n = __copy_user_intel_nocache(to, from, n);
21504 else
21505 - __copy_user(to, from, n);
21506 + __copy_user(to, from, n, __copyuser_seg, "", "");
21507 #else
21508 - __copy_user(to, from, n);
21509 + __copy_user(to, from, n, __copyuser_seg, "", "");
21510 #endif
21511 return n;
21512 }
21513 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21514
21515 -/**
21516 - * copy_to_user: - Copy a block of data into user space.
21517 - * @to: Destination address, in user space.
21518 - * @from: Source address, in kernel space.
21519 - * @n: Number of bytes to copy.
21520 - *
21521 - * Context: User context only. This function may sleep.
21522 - *
21523 - * Copy data from kernel space to user space.
21524 - *
21525 - * Returns number of bytes that could not be copied.
21526 - * On success, this will be zero.
21527 - */
21528 -unsigned long
21529 -copy_to_user(void __user *to, const void *from, unsigned long n)
21530 -{
21531 - if (access_ok(VERIFY_WRITE, to, n))
21532 - n = __copy_to_user(to, from, n);
21533 - return n;
21534 -}
21535 -EXPORT_SYMBOL(copy_to_user);
21536 -
21537 -/**
21538 - * copy_from_user: - Copy a block of data from user space.
21539 - * @to: Destination address, in kernel space.
21540 - * @from: Source address, in user space.
21541 - * @n: Number of bytes to copy.
21542 - *
21543 - * Context: User context only. This function may sleep.
21544 - *
21545 - * Copy data from user space to kernel space.
21546 - *
21547 - * Returns number of bytes that could not be copied.
21548 - * On success, this will be zero.
21549 - *
21550 - * If some data could not be copied, this function will pad the copied
21551 - * data to the requested size using zero bytes.
21552 - */
21553 -unsigned long
21554 -_copy_from_user(void *to, const void __user *from, unsigned long n)
21555 -{
21556 - if (access_ok(VERIFY_READ, from, n))
21557 - n = __copy_from_user(to, from, n);
21558 - else
21559 - memset(to, 0, n);
21560 - return n;
21561 -}
21562 -EXPORT_SYMBOL(_copy_from_user);
21563 -
21564 void copy_from_user_overflow(void)
21565 {
21566 WARN(1, "Buffer overflow detected!\n");
21567 }
21568 EXPORT_SYMBOL(copy_from_user_overflow);
21569 +
21570 +void copy_to_user_overflow(void)
21571 +{
21572 + WARN(1, "Buffer overflow detected!\n");
21573 +}
21574 +EXPORT_SYMBOL(copy_to_user_overflow);
21575 +
21576 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21577 +void __set_fs(mm_segment_t x)
21578 +{
21579 + switch (x.seg) {
21580 + case 0:
21581 + loadsegment(gs, 0);
21582 + break;
21583 + case TASK_SIZE_MAX:
21584 + loadsegment(gs, __USER_DS);
21585 + break;
21586 + case -1UL:
21587 + loadsegment(gs, __KERNEL_DS);
21588 + break;
21589 + default:
21590 + BUG();
21591 + }
21592 + return;
21593 +}
21594 +EXPORT_SYMBOL(__set_fs);
21595 +
21596 +void set_fs(mm_segment_t x)
21597 +{
21598 + current_thread_info()->addr_limit = x;
21599 + __set_fs(x);
21600 +}
21601 +EXPORT_SYMBOL(set_fs);
21602 +#endif
21603 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21604 index b7c2849..8633ad8 100644
21605 --- a/arch/x86/lib/usercopy_64.c
21606 +++ b/arch/x86/lib/usercopy_64.c
21607 @@ -42,6 +42,12 @@ long
21608 __strncpy_from_user(char *dst, const char __user *src, long count)
21609 {
21610 long res;
21611 +
21612 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21613 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21614 + src += PAX_USER_SHADOW_BASE;
21615 +#endif
21616 +
21617 __do_strncpy_from_user(dst, src, count, res);
21618 return res;
21619 }
21620 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21621 {
21622 long __d0;
21623 might_fault();
21624 +
21625 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21626 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21627 + addr += PAX_USER_SHADOW_BASE;
21628 +#endif
21629 +
21630 /* no memory constraint because it doesn't change any memory gcc knows
21631 about */
21632 asm volatile(
21633 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21634 }
21635 EXPORT_SYMBOL(strlen_user);
21636
21637 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21638 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21639 {
21640 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21641 - return copy_user_generic((__force void *)to, (__force void *)from, len);
21642 - }
21643 - return len;
21644 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21645 +
21646 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21647 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21648 + to += PAX_USER_SHADOW_BASE;
21649 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21650 + from += PAX_USER_SHADOW_BASE;
21651 +#endif
21652 +
21653 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21654 + }
21655 + return len;
21656 }
21657 EXPORT_SYMBOL(copy_in_user);
21658
21659 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21660 * it is not necessary to optimize tail handling.
21661 */
21662 unsigned long
21663 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21664 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21665 {
21666 char c;
21667 unsigned zero_len;
21668 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21669 index d0474ad..36e9257 100644
21670 --- a/arch/x86/mm/extable.c
21671 +++ b/arch/x86/mm/extable.c
21672 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21673 const struct exception_table_entry *fixup;
21674
21675 #ifdef CONFIG_PNPBIOS
21676 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21677 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21678 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21679 extern u32 pnp_bios_is_utter_crap;
21680 pnp_bios_is_utter_crap = 1;
21681 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21682 index 5db0490..2ddce45 100644
21683 --- a/arch/x86/mm/fault.c
21684 +++ b/arch/x86/mm/fault.c
21685 @@ -13,11 +13,18 @@
21686 #include <linux/perf_event.h> /* perf_sw_event */
21687 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21688 #include <linux/prefetch.h> /* prefetchw */
21689 +#include <linux/unistd.h>
21690 +#include <linux/compiler.h>
21691
21692 #include <asm/traps.h> /* dotraplinkage, ... */
21693 #include <asm/pgalloc.h> /* pgd_*(), ... */
21694 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21695 #include <asm/fixmap.h> /* VSYSCALL_START */
21696 +#include <asm/tlbflush.h>
21697 +
21698 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21699 +#include <asm/stacktrace.h>
21700 +#endif
21701
21702 /*
21703 * Page fault error code bits:
21704 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21705 int ret = 0;
21706
21707 /* kprobe_running() needs smp_processor_id() */
21708 - if (kprobes_built_in() && !user_mode_vm(regs)) {
21709 + if (kprobes_built_in() && !user_mode(regs)) {
21710 preempt_disable();
21711 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21712 ret = 1;
21713 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21714 return !instr_lo || (instr_lo>>1) == 1;
21715 case 0x00:
21716 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21717 - if (probe_kernel_address(instr, opcode))
21718 + if (user_mode(regs)) {
21719 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21720 + return 0;
21721 + } else if (probe_kernel_address(instr, opcode))
21722 return 0;
21723
21724 *prefetch = (instr_lo == 0xF) &&
21725 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21726 while (instr < max_instr) {
21727 unsigned char opcode;
21728
21729 - if (probe_kernel_address(instr, opcode))
21730 + if (user_mode(regs)) {
21731 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21732 + break;
21733 + } else if (probe_kernel_address(instr, opcode))
21734 break;
21735
21736 instr++;
21737 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21738 force_sig_info(si_signo, &info, tsk);
21739 }
21740
21741 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21742 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21743 +#endif
21744 +
21745 +#ifdef CONFIG_PAX_EMUTRAMP
21746 +static int pax_handle_fetch_fault(struct pt_regs *regs);
21747 +#endif
21748 +
21749 +#ifdef CONFIG_PAX_PAGEEXEC
21750 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21751 +{
21752 + pgd_t *pgd;
21753 + pud_t *pud;
21754 + pmd_t *pmd;
21755 +
21756 + pgd = pgd_offset(mm, address);
21757 + if (!pgd_present(*pgd))
21758 + return NULL;
21759 + pud = pud_offset(pgd, address);
21760 + if (!pud_present(*pud))
21761 + return NULL;
21762 + pmd = pmd_offset(pud, address);
21763 + if (!pmd_present(*pmd))
21764 + return NULL;
21765 + return pmd;
21766 +}
21767 +#endif
21768 +
21769 DEFINE_SPINLOCK(pgd_lock);
21770 LIST_HEAD(pgd_list);
21771
21772 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21773 for (address = VMALLOC_START & PMD_MASK;
21774 address >= TASK_SIZE && address < FIXADDR_TOP;
21775 address += PMD_SIZE) {
21776 +
21777 +#ifdef CONFIG_PAX_PER_CPU_PGD
21778 + unsigned long cpu;
21779 +#else
21780 struct page *page;
21781 +#endif
21782
21783 spin_lock(&pgd_lock);
21784 +
21785 +#ifdef CONFIG_PAX_PER_CPU_PGD
21786 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
21787 + pgd_t *pgd = get_cpu_pgd(cpu);
21788 + pmd_t *ret;
21789 +#else
21790 list_for_each_entry(page, &pgd_list, lru) {
21791 + pgd_t *pgd = page_address(page);
21792 spinlock_t *pgt_lock;
21793 pmd_t *ret;
21794
21795 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21796 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21797
21798 spin_lock(pgt_lock);
21799 - ret = vmalloc_sync_one(page_address(page), address);
21800 +#endif
21801 +
21802 + ret = vmalloc_sync_one(pgd, address);
21803 +
21804 +#ifndef CONFIG_PAX_PER_CPU_PGD
21805 spin_unlock(pgt_lock);
21806 +#endif
21807
21808 if (!ret)
21809 break;
21810 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21811 * an interrupt in the middle of a task switch..
21812 */
21813 pgd_paddr = read_cr3();
21814 +
21815 +#ifdef CONFIG_PAX_PER_CPU_PGD
21816 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21817 +#endif
21818 +
21819 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21820 if (!pmd_k)
21821 return -1;
21822 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21823 * happen within a race in page table update. In the later
21824 * case just flush:
21825 */
21826 +
21827 +#ifdef CONFIG_PAX_PER_CPU_PGD
21828 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21829 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21830 +#else
21831 pgd = pgd_offset(current->active_mm, address);
21832 +#endif
21833 +
21834 pgd_ref = pgd_offset_k(address);
21835 if (pgd_none(*pgd_ref))
21836 return -1;
21837 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21838 static int is_errata100(struct pt_regs *regs, unsigned long address)
21839 {
21840 #ifdef CONFIG_X86_64
21841 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21842 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21843 return 1;
21844 #endif
21845 return 0;
21846 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21847 }
21848
21849 static const char nx_warning[] = KERN_CRIT
21850 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21851 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21852
21853 static void
21854 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21855 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21856 if (!oops_may_print())
21857 return;
21858
21859 - if (error_code & PF_INSTR) {
21860 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21861 unsigned int level;
21862
21863 pte_t *pte = lookup_address(address, &level);
21864
21865 if (pte && pte_present(*pte) && !pte_exec(*pte))
21866 - printk(nx_warning, current_uid());
21867 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21868 }
21869
21870 +#ifdef CONFIG_PAX_KERNEXEC
21871 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21872 + if (current->signal->curr_ip)
21873 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21874 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21875 + else
21876 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21877 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21878 + }
21879 +#endif
21880 +
21881 printk(KERN_ALERT "BUG: unable to handle kernel ");
21882 if (address < PAGE_SIZE)
21883 printk(KERN_CONT "NULL pointer dereference");
21884 @@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21885 }
21886 #endif
21887
21888 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21889 + if (pax_is_fetch_fault(regs, error_code, address)) {
21890 +
21891 +#ifdef CONFIG_PAX_EMUTRAMP
21892 + switch (pax_handle_fetch_fault(regs)) {
21893 + case 2:
21894 + return;
21895 + }
21896 +#endif
21897 +
21898 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21899 + do_group_exit(SIGKILL);
21900 + }
21901 +#endif
21902 +
21903 if (unlikely(show_unhandled_signals))
21904 show_signal_msg(regs, error_code, address, tsk);
21905
21906 @@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21907 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21908 printk(KERN_ERR
21909 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21910 - tsk->comm, tsk->pid, address);
21911 + tsk->comm, task_pid_nr(tsk), address);
21912 code = BUS_MCEERR_AR;
21913 }
21914 #endif
21915 @@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21916 return 1;
21917 }
21918
21919 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21920 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21921 +{
21922 + pte_t *pte;
21923 + pmd_t *pmd;
21924 + spinlock_t *ptl;
21925 + unsigned char pte_mask;
21926 +
21927 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21928 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21929 + return 0;
21930 +
21931 + /* PaX: it's our fault, let's handle it if we can */
21932 +
21933 + /* PaX: take a look at read faults before acquiring any locks */
21934 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21935 + /* instruction fetch attempt from a protected page in user mode */
21936 + up_read(&mm->mmap_sem);
21937 +
21938 +#ifdef CONFIG_PAX_EMUTRAMP
21939 + switch (pax_handle_fetch_fault(regs)) {
21940 + case 2:
21941 + return 1;
21942 + }
21943 +#endif
21944 +
21945 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21946 + do_group_exit(SIGKILL);
21947 + }
21948 +
21949 + pmd = pax_get_pmd(mm, address);
21950 + if (unlikely(!pmd))
21951 + return 0;
21952 +
21953 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21954 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21955 + pte_unmap_unlock(pte, ptl);
21956 + return 0;
21957 + }
21958 +
21959 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21960 + /* write attempt to a protected page in user mode */
21961 + pte_unmap_unlock(pte, ptl);
21962 + return 0;
21963 + }
21964 +
21965 +#ifdef CONFIG_SMP
21966 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21967 +#else
21968 + if (likely(address > get_limit(regs->cs)))
21969 +#endif
21970 + {
21971 + set_pte(pte, pte_mkread(*pte));
21972 + __flush_tlb_one(address);
21973 + pte_unmap_unlock(pte, ptl);
21974 + up_read(&mm->mmap_sem);
21975 + return 1;
21976 + }
21977 +
21978 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21979 +
21980 + /*
21981 + * PaX: fill DTLB with user rights and retry
21982 + */
21983 + __asm__ __volatile__ (
21984 + "orb %2,(%1)\n"
21985 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21986 +/*
21987 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21988 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21989 + * page fault when examined during a TLB load attempt. this is true not only
21990 + * for PTEs holding a non-present entry but also present entries that will
21991 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21992 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21993 + * for our target pages since their PTEs are simply not in the TLBs at all.
21994 +
21995 + * the best thing in omitting it is that we gain around 15-20% speed in the
21996 + * fast path of the page fault handler and can get rid of tracing since we
21997 + * can no longer flush unintended entries.
21998 + */
21999 + "invlpg (%0)\n"
22000 +#endif
22001 + __copyuser_seg"testb $0,(%0)\n"
22002 + "xorb %3,(%1)\n"
22003 + :
22004 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
22005 + : "memory", "cc");
22006 + pte_unmap_unlock(pte, ptl);
22007 + up_read(&mm->mmap_sem);
22008 + return 1;
22009 +}
22010 +#endif
22011 +
22012 /*
22013 * Handle a spurious fault caused by a stale TLB entry.
22014 *
22015 @@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
22016 static inline int
22017 access_error(unsigned long error_code, struct vm_area_struct *vma)
22018 {
22019 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
22020 + return 1;
22021 +
22022 if (error_code & PF_WRITE) {
22023 /* write, present and write, not present: */
22024 if (unlikely(!(vma->vm_flags & VM_WRITE)))
22025 @@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
22026 {
22027 struct vm_area_struct *vma;
22028 struct task_struct *tsk;
22029 - unsigned long address;
22030 struct mm_struct *mm;
22031 int fault;
22032 int write = error_code & PF_WRITE;
22033 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
22034 (write ? FAULT_FLAG_WRITE : 0);
22035
22036 - tsk = current;
22037 - mm = tsk->mm;
22038 -
22039 /* Get the faulting address: */
22040 - address = read_cr2();
22041 + unsigned long address = read_cr2();
22042 +
22043 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22044 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
22045 + if (!search_exception_tables(regs->ip)) {
22046 + bad_area_nosemaphore(regs, error_code, address);
22047 + return;
22048 + }
22049 + if (address < PAX_USER_SHADOW_BASE) {
22050 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
22051 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
22052 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
22053 + } else
22054 + address -= PAX_USER_SHADOW_BASE;
22055 + }
22056 +#endif
22057 +
22058 + tsk = current;
22059 + mm = tsk->mm;
22060
22061 /*
22062 * Detect and handle instructions that would cause a page fault for
22063 @@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
22064 * User-mode registers count as a user access even for any
22065 * potential system fault or CPU buglet:
22066 */
22067 - if (user_mode_vm(regs)) {
22068 + if (user_mode(regs)) {
22069 local_irq_enable();
22070 error_code |= PF_USER;
22071 } else {
22072 @@ -1122,6 +1328,11 @@ retry:
22073 might_sleep();
22074 }
22075
22076 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
22077 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
22078 + return;
22079 +#endif
22080 +
22081 vma = find_vma(mm, address);
22082 if (unlikely(!vma)) {
22083 bad_area(regs, error_code, address);
22084 @@ -1133,18 +1344,24 @@ retry:
22085 bad_area(regs, error_code, address);
22086 return;
22087 }
22088 - if (error_code & PF_USER) {
22089 - /*
22090 - * Accessing the stack below %sp is always a bug.
22091 - * The large cushion allows instructions like enter
22092 - * and pusha to work. ("enter $65535, $31" pushes
22093 - * 32 pointers and then decrements %sp by 65535.)
22094 - */
22095 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
22096 - bad_area(regs, error_code, address);
22097 - return;
22098 - }
22099 + /*
22100 + * Accessing the stack below %sp is always a bug.
22101 + * The large cushion allows instructions like enter
22102 + * and pusha to work. ("enter $65535, $31" pushes
22103 + * 32 pointers and then decrements %sp by 65535.)
22104 + */
22105 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
22106 + bad_area(regs, error_code, address);
22107 + return;
22108 }
22109 +
22110 +#ifdef CONFIG_PAX_SEGMEXEC
22111 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
22112 + bad_area(regs, error_code, address);
22113 + return;
22114 + }
22115 +#endif
22116 +
22117 if (unlikely(expand_stack(vma, address))) {
22118 bad_area(regs, error_code, address);
22119 return;
22120 @@ -1199,3 +1416,292 @@ good_area:
22121
22122 up_read(&mm->mmap_sem);
22123 }
22124 +
22125 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22126 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
22127 +{
22128 + struct mm_struct *mm = current->mm;
22129 + unsigned long ip = regs->ip;
22130 +
22131 + if (v8086_mode(regs))
22132 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
22133 +
22134 +#ifdef CONFIG_PAX_PAGEEXEC
22135 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
22136 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
22137 + return true;
22138 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
22139 + return true;
22140 + return false;
22141 + }
22142 +#endif
22143 +
22144 +#ifdef CONFIG_PAX_SEGMEXEC
22145 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
22146 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
22147 + return true;
22148 + return false;
22149 + }
22150 +#endif
22151 +
22152 + return false;
22153 +}
22154 +#endif
22155 +
22156 +#ifdef CONFIG_PAX_EMUTRAMP
22157 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
22158 +{
22159 + int err;
22160 +
22161 + do { /* PaX: libffi trampoline emulation */
22162 + unsigned char mov, jmp;
22163 + unsigned int addr1, addr2;
22164 +
22165 +#ifdef CONFIG_X86_64
22166 + if ((regs->ip + 9) >> 32)
22167 + break;
22168 +#endif
22169 +
22170 + err = get_user(mov, (unsigned char __user *)regs->ip);
22171 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22172 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
22173 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22174 +
22175 + if (err)
22176 + break;
22177 +
22178 + if (mov == 0xB8 && jmp == 0xE9) {
22179 + regs->ax = addr1;
22180 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
22181 + return 2;
22182 + }
22183 + } while (0);
22184 +
22185 + do { /* PaX: gcc trampoline emulation #1 */
22186 + unsigned char mov1, mov2;
22187 + unsigned short jmp;
22188 + unsigned int addr1, addr2;
22189 +
22190 +#ifdef CONFIG_X86_64
22191 + if ((regs->ip + 11) >> 32)
22192 + break;
22193 +#endif
22194 +
22195 + err = get_user(mov1, (unsigned char __user *)regs->ip);
22196 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22197 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
22198 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22199 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
22200 +
22201 + if (err)
22202 + break;
22203 +
22204 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
22205 + regs->cx = addr1;
22206 + regs->ax = addr2;
22207 + regs->ip = addr2;
22208 + return 2;
22209 + }
22210 + } while (0);
22211 +
22212 + do { /* PaX: gcc trampoline emulation #2 */
22213 + unsigned char mov, jmp;
22214 + unsigned int addr1, addr2;
22215 +
22216 +#ifdef CONFIG_X86_64
22217 + if ((regs->ip + 9) >> 32)
22218 + break;
22219 +#endif
22220 +
22221 + err = get_user(mov, (unsigned char __user *)regs->ip);
22222 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22223 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
22224 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22225 +
22226 + if (err)
22227 + break;
22228 +
22229 + if (mov == 0xB9 && jmp == 0xE9) {
22230 + regs->cx = addr1;
22231 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
22232 + return 2;
22233 + }
22234 + } while (0);
22235 +
22236 + return 1; /* PaX in action */
22237 +}
22238 +
22239 +#ifdef CONFIG_X86_64
22240 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
22241 +{
22242 + int err;
22243 +
22244 + do { /* PaX: libffi trampoline emulation */
22245 + unsigned short mov1, mov2, jmp1;
22246 + unsigned char stcclc, jmp2;
22247 + unsigned long addr1, addr2;
22248 +
22249 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22250 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22251 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22252 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22253 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
22254 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22255 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22256 +
22257 + if (err)
22258 + break;
22259 +
22260 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22261 + regs->r11 = addr1;
22262 + regs->r10 = addr2;
22263 + if (stcclc == 0xF8)
22264 + regs->flags &= ~X86_EFLAGS_CF;
22265 + else
22266 + regs->flags |= X86_EFLAGS_CF;
22267 + regs->ip = addr1;
22268 + return 2;
22269 + }
22270 + } while (0);
22271 +
22272 + do { /* PaX: gcc trampoline emulation #1 */
22273 + unsigned short mov1, mov2, jmp1;
22274 + unsigned char jmp2;
22275 + unsigned int addr1;
22276 + unsigned long addr2;
22277 +
22278 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22279 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22280 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22281 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22282 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22283 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22284 +
22285 + if (err)
22286 + break;
22287 +
22288 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22289 + regs->r11 = addr1;
22290 + regs->r10 = addr2;
22291 + regs->ip = addr1;
22292 + return 2;
22293 + }
22294 + } while (0);
22295 +
22296 + do { /* PaX: gcc trampoline emulation #2 */
22297 + unsigned short mov1, mov2, jmp1;
22298 + unsigned char jmp2;
22299 + unsigned long addr1, addr2;
22300 +
22301 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22302 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22303 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22304 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22305 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22306 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22307 +
22308 + if (err)
22309 + break;
22310 +
22311 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22312 + regs->r11 = addr1;
22313 + regs->r10 = addr2;
22314 + regs->ip = addr1;
22315 + return 2;
22316 + }
22317 + } while (0);
22318 +
22319 + return 1; /* PaX in action */
22320 +}
22321 +#endif
22322 +
22323 +/*
22324 + * PaX: decide what to do with offenders (regs->ip = fault address)
22325 + *
22326 + * returns 1 when task should be killed
22327 + * 2 when gcc trampoline was detected
22328 + */
22329 +static int pax_handle_fetch_fault(struct pt_regs *regs)
22330 +{
22331 + if (v8086_mode(regs))
22332 + return 1;
22333 +
22334 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22335 + return 1;
22336 +
22337 +#ifdef CONFIG_X86_32
22338 + return pax_handle_fetch_fault_32(regs);
22339 +#else
22340 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22341 + return pax_handle_fetch_fault_32(regs);
22342 + else
22343 + return pax_handle_fetch_fault_64(regs);
22344 +#endif
22345 +}
22346 +#endif
22347 +
22348 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22349 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22350 +{
22351 + long i;
22352 +
22353 + printk(KERN_ERR "PAX: bytes at PC: ");
22354 + for (i = 0; i < 20; i++) {
22355 + unsigned char c;
22356 + if (get_user(c, (unsigned char __force_user *)pc+i))
22357 + printk(KERN_CONT "?? ");
22358 + else
22359 + printk(KERN_CONT "%02x ", c);
22360 + }
22361 + printk("\n");
22362 +
22363 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22364 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
22365 + unsigned long c;
22366 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
22367 +#ifdef CONFIG_X86_32
22368 + printk(KERN_CONT "???????? ");
22369 +#else
22370 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22371 + printk(KERN_CONT "???????? ???????? ");
22372 + else
22373 + printk(KERN_CONT "???????????????? ");
22374 +#endif
22375 + } else {
22376 +#ifdef CONFIG_X86_64
22377 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22378 + printk(KERN_CONT "%08x ", (unsigned int)c);
22379 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22380 + } else
22381 +#endif
22382 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22383 + }
22384 + }
22385 + printk("\n");
22386 +}
22387 +#endif
22388 +
22389 +/**
22390 + * probe_kernel_write(): safely attempt to write to a location
22391 + * @dst: address to write to
22392 + * @src: pointer to the data that shall be written
22393 + * @size: size of the data chunk
22394 + *
22395 + * Safely write to address @dst from the buffer at @src. If a kernel fault
22396 + * happens, handle that and return -EFAULT.
22397 + */
22398 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22399 +{
22400 + long ret;
22401 + mm_segment_t old_fs = get_fs();
22402 +
22403 + set_fs(KERNEL_DS);
22404 + pagefault_disable();
22405 + pax_open_kernel();
22406 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22407 + pax_close_kernel();
22408 + pagefault_enable();
22409 + set_fs(old_fs);
22410 +
22411 + return ret ? -EFAULT : 0;
22412 +}
22413 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22414 index dd74e46..7d26398 100644
22415 --- a/arch/x86/mm/gup.c
22416 +++ b/arch/x86/mm/gup.c
22417 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22418 addr = start;
22419 len = (unsigned long) nr_pages << PAGE_SHIFT;
22420 end = start + len;
22421 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22422 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22423 (void __user *)start, len)))
22424 return 0;
22425
22426 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22427 index f4f29b1..5cac4fb 100644
22428 --- a/arch/x86/mm/highmem_32.c
22429 +++ b/arch/x86/mm/highmem_32.c
22430 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22431 idx = type + KM_TYPE_NR*smp_processor_id();
22432 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22433 BUG_ON(!pte_none(*(kmap_pte-idx)));
22434 +
22435 + pax_open_kernel();
22436 set_pte(kmap_pte-idx, mk_pte(page, prot));
22437 + pax_close_kernel();
22438 +
22439 arch_flush_lazy_mmu_mode();
22440
22441 return (void *)vaddr;
22442 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22443 index f581a18..29efd37 100644
22444 --- a/arch/x86/mm/hugetlbpage.c
22445 +++ b/arch/x86/mm/hugetlbpage.c
22446 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22447 struct hstate *h = hstate_file(file);
22448 struct mm_struct *mm = current->mm;
22449 struct vm_area_struct *vma;
22450 - unsigned long start_addr;
22451 + unsigned long start_addr, pax_task_size = TASK_SIZE;
22452 +
22453 +#ifdef CONFIG_PAX_SEGMEXEC
22454 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22455 + pax_task_size = SEGMEXEC_TASK_SIZE;
22456 +#endif
22457 +
22458 + pax_task_size -= PAGE_SIZE;
22459
22460 if (len > mm->cached_hole_size) {
22461 - start_addr = mm->free_area_cache;
22462 + start_addr = mm->free_area_cache;
22463 } else {
22464 - start_addr = TASK_UNMAPPED_BASE;
22465 - mm->cached_hole_size = 0;
22466 + start_addr = mm->mmap_base;
22467 + mm->cached_hole_size = 0;
22468 }
22469
22470 full_search:
22471 @@ -280,26 +287,27 @@ full_search:
22472
22473 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22474 /* At this point: (!vma || addr < vma->vm_end). */
22475 - if (TASK_SIZE - len < addr) {
22476 + if (pax_task_size - len < addr) {
22477 /*
22478 * Start a new search - just in case we missed
22479 * some holes.
22480 */
22481 - if (start_addr != TASK_UNMAPPED_BASE) {
22482 - start_addr = TASK_UNMAPPED_BASE;
22483 + if (start_addr != mm->mmap_base) {
22484 + start_addr = mm->mmap_base;
22485 mm->cached_hole_size = 0;
22486 goto full_search;
22487 }
22488 return -ENOMEM;
22489 }
22490 - if (!vma || addr + len <= vma->vm_start) {
22491 - mm->free_area_cache = addr + len;
22492 - return addr;
22493 - }
22494 + if (check_heap_stack_gap(vma, addr, len))
22495 + break;
22496 if (addr + mm->cached_hole_size < vma->vm_start)
22497 mm->cached_hole_size = vma->vm_start - addr;
22498 addr = ALIGN(vma->vm_end, huge_page_size(h));
22499 }
22500 +
22501 + mm->free_area_cache = addr + len;
22502 + return addr;
22503 }
22504
22505 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22506 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22507 {
22508 struct hstate *h = hstate_file(file);
22509 struct mm_struct *mm = current->mm;
22510 - struct vm_area_struct *vma, *prev_vma;
22511 - unsigned long base = mm->mmap_base, addr = addr0;
22512 + struct vm_area_struct *vma;
22513 + unsigned long base = mm->mmap_base, addr;
22514 unsigned long largest_hole = mm->cached_hole_size;
22515 - int first_time = 1;
22516
22517 /* don't allow allocations above current base */
22518 if (mm->free_area_cache > base)
22519 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22520 largest_hole = 0;
22521 mm->free_area_cache = base;
22522 }
22523 -try_again:
22524 +
22525 /* make sure it can fit in the remaining address space */
22526 if (mm->free_area_cache < len)
22527 goto fail;
22528
22529 /* either no address requested or can't fit in requested address hole */
22530 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
22531 + addr = (mm->free_area_cache - len);
22532 do {
22533 + addr &= huge_page_mask(h);
22534 + vma = find_vma(mm, addr);
22535 /*
22536 * Lookup failure means no vma is above this address,
22537 * i.e. return with success:
22538 - */
22539 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22540 - return addr;
22541 -
22542 - /*
22543 * new region fits between prev_vma->vm_end and
22544 * vma->vm_start, use it:
22545 */
22546 - if (addr + len <= vma->vm_start &&
22547 - (!prev_vma || (addr >= prev_vma->vm_end))) {
22548 + if (check_heap_stack_gap(vma, addr, len)) {
22549 /* remember the address as a hint for next time */
22550 - mm->cached_hole_size = largest_hole;
22551 - return (mm->free_area_cache = addr);
22552 - } else {
22553 - /* pull free_area_cache down to the first hole */
22554 - if (mm->free_area_cache == vma->vm_end) {
22555 - mm->free_area_cache = vma->vm_start;
22556 - mm->cached_hole_size = largest_hole;
22557 - }
22558 + mm->cached_hole_size = largest_hole;
22559 + return (mm->free_area_cache = addr);
22560 + }
22561 + /* pull free_area_cache down to the first hole */
22562 + if (mm->free_area_cache == vma->vm_end) {
22563 + mm->free_area_cache = vma->vm_start;
22564 + mm->cached_hole_size = largest_hole;
22565 }
22566
22567 /* remember the largest hole we saw so far */
22568 if (addr + largest_hole < vma->vm_start)
22569 - largest_hole = vma->vm_start - addr;
22570 + largest_hole = vma->vm_start - addr;
22571
22572 /* try just below the current vma->vm_start */
22573 - addr = (vma->vm_start - len) & huge_page_mask(h);
22574 - } while (len <= vma->vm_start);
22575 + addr = skip_heap_stack_gap(vma, len);
22576 + } while (!IS_ERR_VALUE(addr));
22577
22578 fail:
22579 /*
22580 - * if hint left us with no space for the requested
22581 - * mapping then try again:
22582 - */
22583 - if (first_time) {
22584 - mm->free_area_cache = base;
22585 - largest_hole = 0;
22586 - first_time = 0;
22587 - goto try_again;
22588 - }
22589 - /*
22590 * A failed mmap() very likely causes application failure,
22591 * so fall back to the bottom-up function here. This scenario
22592 * can happen with large stack limits and large mmap()
22593 * allocations.
22594 */
22595 - mm->free_area_cache = TASK_UNMAPPED_BASE;
22596 +
22597 +#ifdef CONFIG_PAX_SEGMEXEC
22598 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22599 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22600 + else
22601 +#endif
22602 +
22603 + mm->mmap_base = TASK_UNMAPPED_BASE;
22604 +
22605 +#ifdef CONFIG_PAX_RANDMMAP
22606 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22607 + mm->mmap_base += mm->delta_mmap;
22608 +#endif
22609 +
22610 + mm->free_area_cache = mm->mmap_base;
22611 mm->cached_hole_size = ~0UL;
22612 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22613 len, pgoff, flags);
22614 @@ -386,6 +392,7 @@ fail:
22615 /*
22616 * Restore the topdown base:
22617 */
22618 + mm->mmap_base = base;
22619 mm->free_area_cache = base;
22620 mm->cached_hole_size = ~0UL;
22621
22622 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22623 struct hstate *h = hstate_file(file);
22624 struct mm_struct *mm = current->mm;
22625 struct vm_area_struct *vma;
22626 + unsigned long pax_task_size = TASK_SIZE;
22627
22628 if (len & ~huge_page_mask(h))
22629 return -EINVAL;
22630 - if (len > TASK_SIZE)
22631 +
22632 +#ifdef CONFIG_PAX_SEGMEXEC
22633 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22634 + pax_task_size = SEGMEXEC_TASK_SIZE;
22635 +#endif
22636 +
22637 + pax_task_size -= PAGE_SIZE;
22638 +
22639 + if (len > pax_task_size)
22640 return -ENOMEM;
22641
22642 if (flags & MAP_FIXED) {
22643 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22644 if (addr) {
22645 addr = ALIGN(addr, huge_page_size(h));
22646 vma = find_vma(mm, addr);
22647 - if (TASK_SIZE - len >= addr &&
22648 - (!vma || addr + len <= vma->vm_start))
22649 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22650 return addr;
22651 }
22652 if (mm->get_unmapped_area == arch_get_unmapped_area)
22653 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22654 index 87488b9..399f416 100644
22655 --- a/arch/x86/mm/init.c
22656 +++ b/arch/x86/mm/init.c
22657 @@ -15,6 +15,7 @@
22658 #include <asm/tlbflush.h>
22659 #include <asm/tlb.h>
22660 #include <asm/proto.h>
22661 +#include <asm/desc.h>
22662
22663 unsigned long __initdata pgt_buf_start;
22664 unsigned long __meminitdata pgt_buf_end;
22665 @@ -31,7 +32,7 @@ int direct_gbpages
22666 static void __init find_early_table_space(unsigned long end, int use_pse,
22667 int use_gbpages)
22668 {
22669 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22670 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22671 phys_addr_t base;
22672
22673 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22674 @@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22675 */
22676 int devmem_is_allowed(unsigned long pagenr)
22677 {
22678 +#ifdef CONFIG_GRKERNSEC_KMEM
22679 + /* allow BDA */
22680 + if (!pagenr)
22681 + return 1;
22682 + /* allow EBDA */
22683 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22684 + return 1;
22685 +#else
22686 + if (!pagenr)
22687 + return 1;
22688 +#ifdef CONFIG_VM86
22689 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22690 + return 1;
22691 +#endif
22692 +#endif
22693 +
22694 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22695 + return 1;
22696 +#ifdef CONFIG_GRKERNSEC_KMEM
22697 + /* throw out everything else below 1MB */
22698 if (pagenr <= 256)
22699 - return 1;
22700 + return 0;
22701 +#endif
22702 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22703 return 0;
22704 if (!page_is_ram(pagenr))
22705 @@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22706
22707 void free_initmem(void)
22708 {
22709 +
22710 +#ifdef CONFIG_PAX_KERNEXEC
22711 +#ifdef CONFIG_X86_32
22712 + /* PaX: limit KERNEL_CS to actual size */
22713 + unsigned long addr, limit;
22714 + struct desc_struct d;
22715 + int cpu;
22716 +
22717 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22718 + limit = (limit - 1UL) >> PAGE_SHIFT;
22719 +
22720 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22721 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
22722 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22723 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22724 + }
22725 +
22726 + /* PaX: make KERNEL_CS read-only */
22727 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22728 + if (!paravirt_enabled())
22729 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22730 +/*
22731 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22732 + pgd = pgd_offset_k(addr);
22733 + pud = pud_offset(pgd, addr);
22734 + pmd = pmd_offset(pud, addr);
22735 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22736 + }
22737 +*/
22738 +#ifdef CONFIG_X86_PAE
22739 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22740 +/*
22741 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22742 + pgd = pgd_offset_k(addr);
22743 + pud = pud_offset(pgd, addr);
22744 + pmd = pmd_offset(pud, addr);
22745 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22746 + }
22747 +*/
22748 +#endif
22749 +
22750 +#ifdef CONFIG_MODULES
22751 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22752 +#endif
22753 +
22754 +#else
22755 + pgd_t *pgd;
22756 + pud_t *pud;
22757 + pmd_t *pmd;
22758 + unsigned long addr, end;
22759 +
22760 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22761 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22762 + pgd = pgd_offset_k(addr);
22763 + pud = pud_offset(pgd, addr);
22764 + pmd = pmd_offset(pud, addr);
22765 + if (!pmd_present(*pmd))
22766 + continue;
22767 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22768 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22769 + else
22770 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22771 + }
22772 +
22773 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22774 + end = addr + KERNEL_IMAGE_SIZE;
22775 + for (; addr < end; addr += PMD_SIZE) {
22776 + pgd = pgd_offset_k(addr);
22777 + pud = pud_offset(pgd, addr);
22778 + pmd = pmd_offset(pud, addr);
22779 + if (!pmd_present(*pmd))
22780 + continue;
22781 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22782 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22783 + }
22784 +#endif
22785 +
22786 + flush_tlb_all();
22787 +#endif
22788 +
22789 free_init_pages("unused kernel memory",
22790 (unsigned long)(&__init_begin),
22791 (unsigned long)(&__init_end));
22792 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22793 index 29f7c6d..b46b35b 100644
22794 --- a/arch/x86/mm/init_32.c
22795 +++ b/arch/x86/mm/init_32.c
22796 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22797 }
22798
22799 /*
22800 - * Creates a middle page table and puts a pointer to it in the
22801 - * given global directory entry. This only returns the gd entry
22802 - * in non-PAE compilation mode, since the middle layer is folded.
22803 - */
22804 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
22805 -{
22806 - pud_t *pud;
22807 - pmd_t *pmd_table;
22808 -
22809 -#ifdef CONFIG_X86_PAE
22810 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22811 - if (after_bootmem)
22812 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22813 - else
22814 - pmd_table = (pmd_t *)alloc_low_page();
22815 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22816 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22817 - pud = pud_offset(pgd, 0);
22818 - BUG_ON(pmd_table != pmd_offset(pud, 0));
22819 -
22820 - return pmd_table;
22821 - }
22822 -#endif
22823 - pud = pud_offset(pgd, 0);
22824 - pmd_table = pmd_offset(pud, 0);
22825 -
22826 - return pmd_table;
22827 -}
22828 -
22829 -/*
22830 * Create a page table and place a pointer to it in a middle page
22831 * directory entry:
22832 */
22833 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22834 page_table = (pte_t *)alloc_low_page();
22835
22836 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22837 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22838 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22839 +#else
22840 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22841 +#endif
22842 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22843 }
22844
22845 return pte_offset_kernel(pmd, 0);
22846 }
22847
22848 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
22849 +{
22850 + pud_t *pud;
22851 + pmd_t *pmd_table;
22852 +
22853 + pud = pud_offset(pgd, 0);
22854 + pmd_table = pmd_offset(pud, 0);
22855 +
22856 + return pmd_table;
22857 +}
22858 +
22859 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22860 {
22861 int pgd_idx = pgd_index(vaddr);
22862 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22863 int pgd_idx, pmd_idx;
22864 unsigned long vaddr;
22865 pgd_t *pgd;
22866 + pud_t *pud;
22867 pmd_t *pmd;
22868 pte_t *pte = NULL;
22869
22870 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22871 pgd = pgd_base + pgd_idx;
22872
22873 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22874 - pmd = one_md_table_init(pgd);
22875 - pmd = pmd + pmd_index(vaddr);
22876 + pud = pud_offset(pgd, vaddr);
22877 + pmd = pmd_offset(pud, vaddr);
22878 +
22879 +#ifdef CONFIG_X86_PAE
22880 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22881 +#endif
22882 +
22883 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22884 pmd++, pmd_idx++) {
22885 pte = page_table_kmap_check(one_page_table_init(pmd),
22886 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22887 }
22888 }
22889
22890 -static inline int is_kernel_text(unsigned long addr)
22891 +static inline int is_kernel_text(unsigned long start, unsigned long end)
22892 {
22893 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22894 - return 1;
22895 - return 0;
22896 + if ((start > ktla_ktva((unsigned long)_etext) ||
22897 + end <= ktla_ktva((unsigned long)_stext)) &&
22898 + (start > ktla_ktva((unsigned long)_einittext) ||
22899 + end <= ktla_ktva((unsigned long)_sinittext)) &&
22900 +
22901 +#ifdef CONFIG_ACPI_SLEEP
22902 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22903 +#endif
22904 +
22905 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22906 + return 0;
22907 + return 1;
22908 }
22909
22910 /*
22911 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22912 unsigned long last_map_addr = end;
22913 unsigned long start_pfn, end_pfn;
22914 pgd_t *pgd_base = swapper_pg_dir;
22915 - int pgd_idx, pmd_idx, pte_ofs;
22916 + unsigned int pgd_idx, pmd_idx, pte_ofs;
22917 unsigned long pfn;
22918 pgd_t *pgd;
22919 + pud_t *pud;
22920 pmd_t *pmd;
22921 pte_t *pte;
22922 unsigned pages_2m, pages_4k;
22923 @@ -281,8 +282,13 @@ repeat:
22924 pfn = start_pfn;
22925 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22926 pgd = pgd_base + pgd_idx;
22927 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22928 - pmd = one_md_table_init(pgd);
22929 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22930 + pud = pud_offset(pgd, 0);
22931 + pmd = pmd_offset(pud, 0);
22932 +
22933 +#ifdef CONFIG_X86_PAE
22934 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22935 +#endif
22936
22937 if (pfn >= end_pfn)
22938 continue;
22939 @@ -294,14 +300,13 @@ repeat:
22940 #endif
22941 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22942 pmd++, pmd_idx++) {
22943 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22944 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22945
22946 /*
22947 * Map with big pages if possible, otherwise
22948 * create normal page tables:
22949 */
22950 if (use_pse) {
22951 - unsigned int addr2;
22952 pgprot_t prot = PAGE_KERNEL_LARGE;
22953 /*
22954 * first pass will use the same initial
22955 @@ -311,11 +316,7 @@ repeat:
22956 __pgprot(PTE_IDENT_ATTR |
22957 _PAGE_PSE);
22958
22959 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22960 - PAGE_OFFSET + PAGE_SIZE-1;
22961 -
22962 - if (is_kernel_text(addr) ||
22963 - is_kernel_text(addr2))
22964 + if (is_kernel_text(address, address + PMD_SIZE))
22965 prot = PAGE_KERNEL_LARGE_EXEC;
22966
22967 pages_2m++;
22968 @@ -332,7 +333,7 @@ repeat:
22969 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22970 pte += pte_ofs;
22971 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22972 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22973 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22974 pgprot_t prot = PAGE_KERNEL;
22975 /*
22976 * first pass will use the same initial
22977 @@ -340,7 +341,7 @@ repeat:
22978 */
22979 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22980
22981 - if (is_kernel_text(addr))
22982 + if (is_kernel_text(address, address + PAGE_SIZE))
22983 prot = PAGE_KERNEL_EXEC;
22984
22985 pages_4k++;
22986 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22987
22988 pud = pud_offset(pgd, va);
22989 pmd = pmd_offset(pud, va);
22990 - if (!pmd_present(*pmd))
22991 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
22992 break;
22993
22994 pte = pte_offset_kernel(pmd, va);
22995 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22996
22997 static void __init pagetable_init(void)
22998 {
22999 - pgd_t *pgd_base = swapper_pg_dir;
23000 -
23001 - permanent_kmaps_init(pgd_base);
23002 + permanent_kmaps_init(swapper_pg_dir);
23003 }
23004
23005 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
23006 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
23007 EXPORT_SYMBOL_GPL(__supported_pte_mask);
23008
23009 /* user-defined highmem size */
23010 @@ -757,6 +756,12 @@ void __init mem_init(void)
23011
23012 pci_iommu_alloc();
23013
23014 +#ifdef CONFIG_PAX_PER_CPU_PGD
23015 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
23016 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23017 + KERNEL_PGD_PTRS);
23018 +#endif
23019 +
23020 #ifdef CONFIG_FLATMEM
23021 BUG_ON(!mem_map);
23022 #endif
23023 @@ -774,7 +779,7 @@ void __init mem_init(void)
23024 set_highmem_pages_init();
23025
23026 codesize = (unsigned long) &_etext - (unsigned long) &_text;
23027 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
23028 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
23029 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
23030
23031 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
23032 @@ -815,10 +820,10 @@ void __init mem_init(void)
23033 ((unsigned long)&__init_end -
23034 (unsigned long)&__init_begin) >> 10,
23035
23036 - (unsigned long)&_etext, (unsigned long)&_edata,
23037 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
23038 + (unsigned long)&_sdata, (unsigned long)&_edata,
23039 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
23040
23041 - (unsigned long)&_text, (unsigned long)&_etext,
23042 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
23043 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
23044
23045 /*
23046 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
23047 if (!kernel_set_to_readonly)
23048 return;
23049
23050 + start = ktla_ktva(start);
23051 pr_debug("Set kernel text: %lx - %lx for read write\n",
23052 start, start+size);
23053
23054 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
23055 if (!kernel_set_to_readonly)
23056 return;
23057
23058 + start = ktla_ktva(start);
23059 pr_debug("Set kernel text: %lx - %lx for read only\n",
23060 start, start+size);
23061
23062 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
23063 unsigned long start = PFN_ALIGN(_text);
23064 unsigned long size = PFN_ALIGN(_etext) - start;
23065
23066 + start = ktla_ktva(start);
23067 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
23068 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
23069 size >> 10);
23070 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
23071 index bbaaa00..796fa65 100644
23072 --- a/arch/x86/mm/init_64.c
23073 +++ b/arch/x86/mm/init_64.c
23074 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
23075 * around without checking the pgd every time.
23076 */
23077
23078 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
23079 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
23080 EXPORT_SYMBOL_GPL(__supported_pte_mask);
23081
23082 int force_personality32;
23083 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23084
23085 for (address = start; address <= end; address += PGDIR_SIZE) {
23086 const pgd_t *pgd_ref = pgd_offset_k(address);
23087 +
23088 +#ifdef CONFIG_PAX_PER_CPU_PGD
23089 + unsigned long cpu;
23090 +#else
23091 struct page *page;
23092 +#endif
23093
23094 if (pgd_none(*pgd_ref))
23095 continue;
23096
23097 spin_lock(&pgd_lock);
23098 +
23099 +#ifdef CONFIG_PAX_PER_CPU_PGD
23100 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23101 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
23102 +#else
23103 list_for_each_entry(page, &pgd_list, lru) {
23104 pgd_t *pgd;
23105 spinlock_t *pgt_lock;
23106 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23107 /* the pgt_lock only for Xen */
23108 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23109 spin_lock(pgt_lock);
23110 +#endif
23111
23112 if (pgd_none(*pgd))
23113 set_pgd(pgd, *pgd_ref);
23114 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23115 BUG_ON(pgd_page_vaddr(*pgd)
23116 != pgd_page_vaddr(*pgd_ref));
23117
23118 +#ifndef CONFIG_PAX_PER_CPU_PGD
23119 spin_unlock(pgt_lock);
23120 +#endif
23121 +
23122 }
23123 spin_unlock(&pgd_lock);
23124 }
23125 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
23126 pmd = fill_pmd(pud, vaddr);
23127 pte = fill_pte(pmd, vaddr);
23128
23129 + pax_open_kernel();
23130 set_pte(pte, new_pte);
23131 + pax_close_kernel();
23132
23133 /*
23134 * It's enough to flush this one mapping.
23135 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
23136 pgd = pgd_offset_k((unsigned long)__va(phys));
23137 if (pgd_none(*pgd)) {
23138 pud = (pud_t *) spp_getpage();
23139 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
23140 - _PAGE_USER));
23141 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
23142 }
23143 pud = pud_offset(pgd, (unsigned long)__va(phys));
23144 if (pud_none(*pud)) {
23145 pmd = (pmd_t *) spp_getpage();
23146 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
23147 - _PAGE_USER));
23148 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
23149 }
23150 pmd = pmd_offset(pud, phys);
23151 BUG_ON(!pmd_none(*pmd));
23152 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
23153 if (pfn >= pgt_buf_top)
23154 panic("alloc_low_page: ran out of memory");
23155
23156 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
23157 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
23158 clear_page(adr);
23159 *phys = pfn * PAGE_SIZE;
23160 return adr;
23161 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
23162
23163 phys = __pa(virt);
23164 left = phys & (PAGE_SIZE - 1);
23165 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
23166 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
23167 adr = (void *)(((unsigned long)adr) | left);
23168
23169 return adr;
23170 @@ -693,6 +707,12 @@ void __init mem_init(void)
23171
23172 pci_iommu_alloc();
23173
23174 +#ifdef CONFIG_PAX_PER_CPU_PGD
23175 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
23176 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23177 + KERNEL_PGD_PTRS);
23178 +#endif
23179 +
23180 /* clear_bss() already clear the empty_zero_page */
23181
23182 reservedpages = 0;
23183 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
23184 static struct vm_area_struct gate_vma = {
23185 .vm_start = VSYSCALL_START,
23186 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
23187 - .vm_page_prot = PAGE_READONLY_EXEC,
23188 - .vm_flags = VM_READ | VM_EXEC
23189 + .vm_page_prot = PAGE_READONLY,
23190 + .vm_flags = VM_READ
23191 };
23192
23193 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
23194 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
23195
23196 const char *arch_vma_name(struct vm_area_struct *vma)
23197 {
23198 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23199 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23200 return "[vdso]";
23201 if (vma == &gate_vma)
23202 return "[vsyscall]";
23203 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
23204 index 7b179b4..6bd1777 100644
23205 --- a/arch/x86/mm/iomap_32.c
23206 +++ b/arch/x86/mm/iomap_32.c
23207 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
23208 type = kmap_atomic_idx_push();
23209 idx = type + KM_TYPE_NR * smp_processor_id();
23210 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
23211 +
23212 + pax_open_kernel();
23213 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
23214 + pax_close_kernel();
23215 +
23216 arch_flush_lazy_mmu_mode();
23217
23218 return (void *)vaddr;
23219 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
23220 index be1ef57..55f0160 100644
23221 --- a/arch/x86/mm/ioremap.c
23222 +++ b/arch/x86/mm/ioremap.c
23223 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
23224 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
23225 int is_ram = page_is_ram(pfn);
23226
23227 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
23228 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
23229 return NULL;
23230 WARN_ON_ONCE(is_ram);
23231 }
23232 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
23233
23234 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
23235 if (page_is_ram(start >> PAGE_SHIFT))
23236 +#ifdef CONFIG_HIGHMEM
23237 + if ((start >> PAGE_SHIFT) < max_low_pfn)
23238 +#endif
23239 return __va(phys);
23240
23241 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
23242 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
23243 early_param("early_ioremap_debug", early_ioremap_debug_setup);
23244
23245 static __initdata int after_paging_init;
23246 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
23247 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
23248
23249 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
23250 {
23251 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
23252 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
23253
23254 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
23255 - memset(bm_pte, 0, sizeof(bm_pte));
23256 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
23257 + pmd_populate_user(&init_mm, pmd, bm_pte);
23258
23259 /*
23260 * The boot-ioremap range spans multiple pmds, for which
23261 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
23262 index d87dd6d..bf3fa66 100644
23263 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
23264 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23265 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23266 * memory (e.g. tracked pages)? For now, we need this to avoid
23267 * invoking kmemcheck for PnP BIOS calls.
23268 */
23269 - if (regs->flags & X86_VM_MASK)
23270 + if (v8086_mode(regs))
23271 return false;
23272 - if (regs->cs != __KERNEL_CS)
23273 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23274 return false;
23275
23276 pte = kmemcheck_pte_lookup(address);
23277 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23278 index 845df68..1d8d29f 100644
23279 --- a/arch/x86/mm/mmap.c
23280 +++ b/arch/x86/mm/mmap.c
23281 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23282 * Leave an at least ~128 MB hole with possible stack randomization.
23283 */
23284 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23285 -#define MAX_GAP (TASK_SIZE/6*5)
23286 +#define MAX_GAP (pax_task_size/6*5)
23287
23288 static int mmap_is_legacy(void)
23289 {
23290 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23291 return rnd << PAGE_SHIFT;
23292 }
23293
23294 -static unsigned long mmap_base(void)
23295 +static unsigned long mmap_base(struct mm_struct *mm)
23296 {
23297 unsigned long gap = rlimit(RLIMIT_STACK);
23298 + unsigned long pax_task_size = TASK_SIZE;
23299 +
23300 +#ifdef CONFIG_PAX_SEGMEXEC
23301 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23302 + pax_task_size = SEGMEXEC_TASK_SIZE;
23303 +#endif
23304
23305 if (gap < MIN_GAP)
23306 gap = MIN_GAP;
23307 else if (gap > MAX_GAP)
23308 gap = MAX_GAP;
23309
23310 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23311 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23312 }
23313
23314 /*
23315 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23316 * does, but not when emulating X86_32
23317 */
23318 -static unsigned long mmap_legacy_base(void)
23319 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
23320 {
23321 - if (mmap_is_ia32())
23322 + if (mmap_is_ia32()) {
23323 +
23324 +#ifdef CONFIG_PAX_SEGMEXEC
23325 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23326 + return SEGMEXEC_TASK_UNMAPPED_BASE;
23327 + else
23328 +#endif
23329 +
23330 return TASK_UNMAPPED_BASE;
23331 - else
23332 + } else
23333 return TASK_UNMAPPED_BASE + mmap_rnd();
23334 }
23335
23336 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23337 void arch_pick_mmap_layout(struct mm_struct *mm)
23338 {
23339 if (mmap_is_legacy()) {
23340 - mm->mmap_base = mmap_legacy_base();
23341 + mm->mmap_base = mmap_legacy_base(mm);
23342 +
23343 +#ifdef CONFIG_PAX_RANDMMAP
23344 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23345 + mm->mmap_base += mm->delta_mmap;
23346 +#endif
23347 +
23348 mm->get_unmapped_area = arch_get_unmapped_area;
23349 mm->unmap_area = arch_unmap_area;
23350 } else {
23351 - mm->mmap_base = mmap_base();
23352 + mm->mmap_base = mmap_base(mm);
23353 +
23354 +#ifdef CONFIG_PAX_RANDMMAP
23355 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23356 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23357 +#endif
23358 +
23359 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23360 mm->unmap_area = arch_unmap_area_topdown;
23361 }
23362 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23363 index de54b9b..799051e 100644
23364 --- a/arch/x86/mm/mmio-mod.c
23365 +++ b/arch/x86/mm/mmio-mod.c
23366 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23367 break;
23368 default:
23369 {
23370 - unsigned char *ip = (unsigned char *)instptr;
23371 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23372 my_trace->opcode = MMIO_UNKNOWN_OP;
23373 my_trace->width = 0;
23374 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23375 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23376 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23377 void __iomem *addr)
23378 {
23379 - static atomic_t next_id;
23380 + static atomic_unchecked_t next_id;
23381 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23382 /* These are page-unaligned. */
23383 struct mmiotrace_map map = {
23384 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23385 .private = trace
23386 },
23387 .phys = offset,
23388 - .id = atomic_inc_return(&next_id)
23389 + .id = atomic_inc_return_unchecked(&next_id)
23390 };
23391 map.map_id = trace->id;
23392
23393 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23394 index b008656..773eac2 100644
23395 --- a/arch/x86/mm/pageattr-test.c
23396 +++ b/arch/x86/mm/pageattr-test.c
23397 @@ -36,7 +36,7 @@ enum {
23398
23399 static int pte_testbit(pte_t pte)
23400 {
23401 - return pte_flags(pte) & _PAGE_UNUSED1;
23402 + return pte_flags(pte) & _PAGE_CPA_TEST;
23403 }
23404
23405 struct split_state {
23406 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23407 index f9e5267..77b1a40 100644
23408 --- a/arch/x86/mm/pageattr.c
23409 +++ b/arch/x86/mm/pageattr.c
23410 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23411 */
23412 #ifdef CONFIG_PCI_BIOS
23413 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23414 - pgprot_val(forbidden) |= _PAGE_NX;
23415 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23416 #endif
23417
23418 /*
23419 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23420 * Does not cover __inittext since that is gone later on. On
23421 * 64bit we do not enforce !NX on the low mapping
23422 */
23423 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
23424 - pgprot_val(forbidden) |= _PAGE_NX;
23425 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23426 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23427
23428 +#ifdef CONFIG_DEBUG_RODATA
23429 /*
23430 * The .rodata section needs to be read-only. Using the pfn
23431 * catches all aliases.
23432 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23433 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23434 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23435 pgprot_val(forbidden) |= _PAGE_RW;
23436 +#endif
23437
23438 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23439 /*
23440 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23441 }
23442 #endif
23443
23444 +#ifdef CONFIG_PAX_KERNEXEC
23445 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23446 + pgprot_val(forbidden) |= _PAGE_RW;
23447 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23448 + }
23449 +#endif
23450 +
23451 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23452
23453 return prot;
23454 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23455 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23456 {
23457 /* change init_mm */
23458 + pax_open_kernel();
23459 set_pte_atomic(kpte, pte);
23460 +
23461 #ifdef CONFIG_X86_32
23462 if (!SHARED_KERNEL_PMD) {
23463 +
23464 +#ifdef CONFIG_PAX_PER_CPU_PGD
23465 + unsigned long cpu;
23466 +#else
23467 struct page *page;
23468 +#endif
23469
23470 +#ifdef CONFIG_PAX_PER_CPU_PGD
23471 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23472 + pgd_t *pgd = get_cpu_pgd(cpu);
23473 +#else
23474 list_for_each_entry(page, &pgd_list, lru) {
23475 - pgd_t *pgd;
23476 + pgd_t *pgd = (pgd_t *)page_address(page);
23477 +#endif
23478 +
23479 pud_t *pud;
23480 pmd_t *pmd;
23481
23482 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
23483 + pgd += pgd_index(address);
23484 pud = pud_offset(pgd, address);
23485 pmd = pmd_offset(pud, address);
23486 set_pte_atomic((pte_t *)pmd, pte);
23487 }
23488 }
23489 #endif
23490 + pax_close_kernel();
23491 }
23492
23493 static int
23494 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23495 index f6ff57b..481690f 100644
23496 --- a/arch/x86/mm/pat.c
23497 +++ b/arch/x86/mm/pat.c
23498 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23499
23500 if (!entry) {
23501 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23502 - current->comm, current->pid, start, end);
23503 + current->comm, task_pid_nr(current), start, end);
23504 return -EINVAL;
23505 }
23506
23507 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23508 while (cursor < to) {
23509 if (!devmem_is_allowed(pfn)) {
23510 printk(KERN_INFO
23511 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23512 - current->comm, from, to);
23513 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23514 + current->comm, from, to, cursor);
23515 return 0;
23516 }
23517 cursor += PAGE_SIZE;
23518 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23519 printk(KERN_INFO
23520 "%s:%d ioremap_change_attr failed %s "
23521 "for %Lx-%Lx\n",
23522 - current->comm, current->pid,
23523 + current->comm, task_pid_nr(current),
23524 cattr_name(flags),
23525 base, (unsigned long long)(base + size));
23526 return -EINVAL;
23527 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23528 if (want_flags != flags) {
23529 printk(KERN_WARNING
23530 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23531 - current->comm, current->pid,
23532 + current->comm, task_pid_nr(current),
23533 cattr_name(want_flags),
23534 (unsigned long long)paddr,
23535 (unsigned long long)(paddr + size),
23536 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23537 free_memtype(paddr, paddr + size);
23538 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23539 " for %Lx-%Lx, got %s\n",
23540 - current->comm, current->pid,
23541 + current->comm, task_pid_nr(current),
23542 cattr_name(want_flags),
23543 (unsigned long long)paddr,
23544 (unsigned long long)(paddr + size),
23545 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23546 index 9f0614d..92ae64a 100644
23547 --- a/arch/x86/mm/pf_in.c
23548 +++ b/arch/x86/mm/pf_in.c
23549 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23550 int i;
23551 enum reason_type rv = OTHERS;
23552
23553 - p = (unsigned char *)ins_addr;
23554 + p = (unsigned char *)ktla_ktva(ins_addr);
23555 p += skip_prefix(p, &prf);
23556 p += get_opcode(p, &opcode);
23557
23558 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23559 struct prefix_bits prf;
23560 int i;
23561
23562 - p = (unsigned char *)ins_addr;
23563 + p = (unsigned char *)ktla_ktva(ins_addr);
23564 p += skip_prefix(p, &prf);
23565 p += get_opcode(p, &opcode);
23566
23567 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23568 struct prefix_bits prf;
23569 int i;
23570
23571 - p = (unsigned char *)ins_addr;
23572 + p = (unsigned char *)ktla_ktva(ins_addr);
23573 p += skip_prefix(p, &prf);
23574 p += get_opcode(p, &opcode);
23575
23576 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23577 struct prefix_bits prf;
23578 int i;
23579
23580 - p = (unsigned char *)ins_addr;
23581 + p = (unsigned char *)ktla_ktva(ins_addr);
23582 p += skip_prefix(p, &prf);
23583 p += get_opcode(p, &opcode);
23584 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23585 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23586 struct prefix_bits prf;
23587 int i;
23588
23589 - p = (unsigned char *)ins_addr;
23590 + p = (unsigned char *)ktla_ktva(ins_addr);
23591 p += skip_prefix(p, &prf);
23592 p += get_opcode(p, &opcode);
23593 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23594 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23595 index 8573b83..c3b1a30 100644
23596 --- a/arch/x86/mm/pgtable.c
23597 +++ b/arch/x86/mm/pgtable.c
23598 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23599 list_del(&page->lru);
23600 }
23601
23602 -#define UNSHARED_PTRS_PER_PGD \
23603 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23604 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23605 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23606
23607 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23608 +{
23609 + while (count--)
23610 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23611 +}
23612 +#endif
23613
23614 +#ifdef CONFIG_PAX_PER_CPU_PGD
23615 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23616 +{
23617 + while (count--)
23618 +
23619 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23620 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23621 +#else
23622 + *dst++ = *src++;
23623 +#endif
23624 +
23625 +}
23626 +#endif
23627 +
23628 +#ifdef CONFIG_X86_64
23629 +#define pxd_t pud_t
23630 +#define pyd_t pgd_t
23631 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23632 +#define pxd_free(mm, pud) pud_free((mm), (pud))
23633 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23634 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
23635 +#define PYD_SIZE PGDIR_SIZE
23636 +#else
23637 +#define pxd_t pmd_t
23638 +#define pyd_t pud_t
23639 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23640 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
23641 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23642 +#define pyd_offset(mm, address) pud_offset((mm), (address))
23643 +#define PYD_SIZE PUD_SIZE
23644 +#endif
23645 +
23646 +#ifdef CONFIG_PAX_PER_CPU_PGD
23647 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23648 +static inline void pgd_dtor(pgd_t *pgd) {}
23649 +#else
23650 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23651 {
23652 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23653 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23654 pgd_list_del(pgd);
23655 spin_unlock(&pgd_lock);
23656 }
23657 +#endif
23658
23659 /*
23660 * List of all pgd's needed for non-PAE so it can invalidate entries
23661 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23662 * -- wli
23663 */
23664
23665 -#ifdef CONFIG_X86_PAE
23666 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23667 /*
23668 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23669 * updating the top-level pagetable entries to guarantee the
23670 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23671 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23672 * and initialize the kernel pmds here.
23673 */
23674 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23675 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23676
23677 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23678 {
23679 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23680 */
23681 flush_tlb_mm(mm);
23682 }
23683 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23684 +#define PREALLOCATED_PXDS USER_PGD_PTRS
23685 #else /* !CONFIG_X86_PAE */
23686
23687 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23688 -#define PREALLOCATED_PMDS 0
23689 +#define PREALLOCATED_PXDS 0
23690
23691 #endif /* CONFIG_X86_PAE */
23692
23693 -static void free_pmds(pmd_t *pmds[])
23694 +static void free_pxds(pxd_t *pxds[])
23695 {
23696 int i;
23697
23698 - for(i = 0; i < PREALLOCATED_PMDS; i++)
23699 - if (pmds[i])
23700 - free_page((unsigned long)pmds[i]);
23701 + for(i = 0; i < PREALLOCATED_PXDS; i++)
23702 + if (pxds[i])
23703 + free_page((unsigned long)pxds[i]);
23704 }
23705
23706 -static int preallocate_pmds(pmd_t *pmds[])
23707 +static int preallocate_pxds(pxd_t *pxds[])
23708 {
23709 int i;
23710 bool failed = false;
23711
23712 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23713 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23714 - if (pmd == NULL)
23715 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23716 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23717 + if (pxd == NULL)
23718 failed = true;
23719 - pmds[i] = pmd;
23720 + pxds[i] = pxd;
23721 }
23722
23723 if (failed) {
23724 - free_pmds(pmds);
23725 + free_pxds(pxds);
23726 return -ENOMEM;
23727 }
23728
23729 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23730 * preallocate which never got a corresponding vma will need to be
23731 * freed manually.
23732 */
23733 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23734 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23735 {
23736 int i;
23737
23738 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23739 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23740 pgd_t pgd = pgdp[i];
23741
23742 if (pgd_val(pgd) != 0) {
23743 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23744 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23745
23746 - pgdp[i] = native_make_pgd(0);
23747 + set_pgd(pgdp + i, native_make_pgd(0));
23748
23749 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23750 - pmd_free(mm, pmd);
23751 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23752 + pxd_free(mm, pxd);
23753 }
23754 }
23755 }
23756
23757 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23758 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23759 {
23760 - pud_t *pud;
23761 + pyd_t *pyd;
23762 unsigned long addr;
23763 int i;
23764
23765 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23766 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23767 return;
23768
23769 - pud = pud_offset(pgd, 0);
23770 +#ifdef CONFIG_X86_64
23771 + pyd = pyd_offset(mm, 0L);
23772 +#else
23773 + pyd = pyd_offset(pgd, 0L);
23774 +#endif
23775
23776 - for (addr = i = 0; i < PREALLOCATED_PMDS;
23777 - i++, pud++, addr += PUD_SIZE) {
23778 - pmd_t *pmd = pmds[i];
23779 + for (addr = i = 0; i < PREALLOCATED_PXDS;
23780 + i++, pyd++, addr += PYD_SIZE) {
23781 + pxd_t *pxd = pxds[i];
23782
23783 if (i >= KERNEL_PGD_BOUNDARY)
23784 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23785 - sizeof(pmd_t) * PTRS_PER_PMD);
23786 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23787 + sizeof(pxd_t) * PTRS_PER_PMD);
23788
23789 - pud_populate(mm, pud, pmd);
23790 + pyd_populate(mm, pyd, pxd);
23791 }
23792 }
23793
23794 pgd_t *pgd_alloc(struct mm_struct *mm)
23795 {
23796 pgd_t *pgd;
23797 - pmd_t *pmds[PREALLOCATED_PMDS];
23798 + pxd_t *pxds[PREALLOCATED_PXDS];
23799
23800 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23801
23802 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23803
23804 mm->pgd = pgd;
23805
23806 - if (preallocate_pmds(pmds) != 0)
23807 + if (preallocate_pxds(pxds) != 0)
23808 goto out_free_pgd;
23809
23810 if (paravirt_pgd_alloc(mm) != 0)
23811 - goto out_free_pmds;
23812 + goto out_free_pxds;
23813
23814 /*
23815 * Make sure that pre-populating the pmds is atomic with
23816 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23817 spin_lock(&pgd_lock);
23818
23819 pgd_ctor(mm, pgd);
23820 - pgd_prepopulate_pmd(mm, pgd, pmds);
23821 + pgd_prepopulate_pxd(mm, pgd, pxds);
23822
23823 spin_unlock(&pgd_lock);
23824
23825 return pgd;
23826
23827 -out_free_pmds:
23828 - free_pmds(pmds);
23829 +out_free_pxds:
23830 + free_pxds(pxds);
23831 out_free_pgd:
23832 free_page((unsigned long)pgd);
23833 out:
23834 @@ -295,7 +344,7 @@ out:
23835
23836 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23837 {
23838 - pgd_mop_up_pmds(mm, pgd);
23839 + pgd_mop_up_pxds(mm, pgd);
23840 pgd_dtor(pgd);
23841 paravirt_pgd_free(mm, pgd);
23842 free_page((unsigned long)pgd);
23843 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23844 index cac7184..09a39fa 100644
23845 --- a/arch/x86/mm/pgtable_32.c
23846 +++ b/arch/x86/mm/pgtable_32.c
23847 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23848 return;
23849 }
23850 pte = pte_offset_kernel(pmd, vaddr);
23851 +
23852 + pax_open_kernel();
23853 if (pte_val(pteval))
23854 set_pte_at(&init_mm, vaddr, pte, pteval);
23855 else
23856 pte_clear(&init_mm, vaddr, pte);
23857 + pax_close_kernel();
23858
23859 /*
23860 * It's enough to flush this one mapping.
23861 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23862 index 410531d..0f16030 100644
23863 --- a/arch/x86/mm/setup_nx.c
23864 +++ b/arch/x86/mm/setup_nx.c
23865 @@ -5,8 +5,10 @@
23866 #include <asm/pgtable.h>
23867 #include <asm/proto.h>
23868
23869 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23870 static int disable_nx __cpuinitdata;
23871
23872 +#ifndef CONFIG_PAX_PAGEEXEC
23873 /*
23874 * noexec = on|off
23875 *
23876 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23877 return 0;
23878 }
23879 early_param("noexec", noexec_setup);
23880 +#endif
23881 +
23882 +#endif
23883
23884 void __cpuinit x86_configure_nx(void)
23885 {
23886 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23887 if (cpu_has_nx && !disable_nx)
23888 __supported_pte_mask |= _PAGE_NX;
23889 else
23890 +#endif
23891 __supported_pte_mask &= ~_PAGE_NX;
23892 }
23893
23894 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23895 index d6c0418..06a0ad5 100644
23896 --- a/arch/x86/mm/tlb.c
23897 +++ b/arch/x86/mm/tlb.c
23898 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
23899 BUG();
23900 cpumask_clear_cpu(cpu,
23901 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23902 +
23903 +#ifndef CONFIG_PAX_PER_CPU_PGD
23904 load_cr3(swapper_pg_dir);
23905 +#endif
23906 +
23907 }
23908 EXPORT_SYMBOL_GPL(leave_mm);
23909
23910 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23911 index 6687022..ceabcfa 100644
23912 --- a/arch/x86/net/bpf_jit.S
23913 +++ b/arch/x86/net/bpf_jit.S
23914 @@ -9,6 +9,7 @@
23915 */
23916 #include <linux/linkage.h>
23917 #include <asm/dwarf2.h>
23918 +#include <asm/alternative-asm.h>
23919
23920 /*
23921 * Calling convention :
23922 @@ -35,6 +36,7 @@ sk_load_word:
23923 jle bpf_slow_path_word
23924 mov (SKBDATA,%rsi),%eax
23925 bswap %eax /* ntohl() */
23926 + pax_force_retaddr
23927 ret
23928
23929
23930 @@ -53,6 +55,7 @@ sk_load_half:
23931 jle bpf_slow_path_half
23932 movzwl (SKBDATA,%rsi),%eax
23933 rol $8,%ax # ntohs()
23934 + pax_force_retaddr
23935 ret
23936
23937 sk_load_byte_ind:
23938 @@ -66,6 +69,7 @@ sk_load_byte:
23939 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23940 jle bpf_slow_path_byte
23941 movzbl (SKBDATA,%rsi),%eax
23942 + pax_force_retaddr
23943 ret
23944
23945 /**
23946 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23947 movzbl (SKBDATA,%rsi),%ebx
23948 and $15,%bl
23949 shl $2,%bl
23950 + pax_force_retaddr
23951 ret
23952 CFI_ENDPROC
23953 ENDPROC(sk_load_byte_msh)
23954 @@ -91,6 +96,7 @@ bpf_error:
23955 xor %eax,%eax
23956 mov -8(%rbp),%rbx
23957 leaveq
23958 + pax_force_retaddr
23959 ret
23960
23961 /* rsi contains offset and can be scratched */
23962 @@ -113,6 +119,7 @@ bpf_slow_path_word:
23963 js bpf_error
23964 mov -12(%rbp),%eax
23965 bswap %eax
23966 + pax_force_retaddr
23967 ret
23968
23969 bpf_slow_path_half:
23970 @@ -121,12 +128,14 @@ bpf_slow_path_half:
23971 mov -12(%rbp),%ax
23972 rol $8,%ax
23973 movzwl %ax,%eax
23974 + pax_force_retaddr
23975 ret
23976
23977 bpf_slow_path_byte:
23978 bpf_slow_path_common(1)
23979 js bpf_error
23980 movzbl -12(%rbp),%eax
23981 + pax_force_retaddr
23982 ret
23983
23984 bpf_slow_path_byte_msh:
23985 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23986 and $15,%al
23987 shl $2,%al
23988 xchg %eax,%ebx
23989 + pax_force_retaddr
23990 ret
23991 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23992 index 7c1b765..8c072c6 100644
23993 --- a/arch/x86/net/bpf_jit_comp.c
23994 +++ b/arch/x86/net/bpf_jit_comp.c
23995 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23996 set_fs(old_fs);
23997 }
23998
23999 +struct bpf_jit_work {
24000 + struct work_struct work;
24001 + void *image;
24002 +};
24003
24004 void bpf_jit_compile(struct sk_filter *fp)
24005 {
24006 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
24007 if (addrs == NULL)
24008 return;
24009
24010 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
24011 + if (!fp->work)
24012 + goto out;
24013 +
24014 /* Before first pass, make a rough estimation of addrs[]
24015 * each bpf instruction is translated to less than 64 bytes
24016 */
24017 @@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
24018 func = sk_load_word;
24019 common_load: seen |= SEEN_DATAREF;
24020 if ((int)K < 0)
24021 - goto out;
24022 + goto error;
24023 t_offset = func - (image + addrs[i]);
24024 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
24025 EMIT1_off32(0xe8, t_offset); /* call */
24026 @@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24027 break;
24028 default:
24029 /* hmm, too complex filter, give up with jit compiler */
24030 - goto out;
24031 + goto error;
24032 }
24033 ilen = prog - temp;
24034 if (image) {
24035 if (unlikely(proglen + ilen > oldproglen)) {
24036 pr_err("bpb_jit_compile fatal error\n");
24037 - kfree(addrs);
24038 - module_free(NULL, image);
24039 - return;
24040 + module_free_exec(NULL, image);
24041 + goto error;
24042 }
24043 + pax_open_kernel();
24044 memcpy(image + proglen, temp, ilen);
24045 + pax_close_kernel();
24046 }
24047 proglen += ilen;
24048 addrs[i] = proglen;
24049 @@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24050 break;
24051 }
24052 if (proglen == oldproglen) {
24053 - image = module_alloc(max_t(unsigned int,
24054 - proglen,
24055 - sizeof(struct work_struct)));
24056 + image = module_alloc_exec(proglen);
24057 if (!image)
24058 - goto out;
24059 + goto error;
24060 }
24061 oldproglen = proglen;
24062 }
24063 @@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24064 bpf_flush_icache(image, image + proglen);
24065
24066 fp->bpf_func = (void *)image;
24067 - }
24068 + } else
24069 +error:
24070 + kfree(fp->work);
24071 +
24072 out:
24073 kfree(addrs);
24074 return;
24075 @@ -645,18 +655,20 @@ out:
24076
24077 static void jit_free_defer(struct work_struct *arg)
24078 {
24079 - module_free(NULL, arg);
24080 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
24081 + kfree(arg);
24082 }
24083
24084 /* run from softirq, we must use a work_struct to call
24085 - * module_free() from process context
24086 + * module_free_exec() from process context
24087 */
24088 void bpf_jit_free(struct sk_filter *fp)
24089 {
24090 if (fp->bpf_func != sk_run_filter) {
24091 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
24092 + struct work_struct *work = &fp->work->work;
24093
24094 INIT_WORK(work, jit_free_defer);
24095 + fp->work->image = fp->bpf_func;
24096 schedule_work(work);
24097 }
24098 }
24099 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
24100 index bff89df..377758a 100644
24101 --- a/arch/x86/oprofile/backtrace.c
24102 +++ b/arch/x86/oprofile/backtrace.c
24103 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
24104 struct stack_frame_ia32 *fp;
24105 unsigned long bytes;
24106
24107 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
24108 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
24109 if (bytes != sizeof(bufhead))
24110 return NULL;
24111
24112 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
24113 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
24114
24115 oprofile_add_trace(bufhead[0].return_address);
24116
24117 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
24118 struct stack_frame bufhead[2];
24119 unsigned long bytes;
24120
24121 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
24122 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
24123 if (bytes != sizeof(bufhead))
24124 return NULL;
24125
24126 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
24127 {
24128 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
24129
24130 - if (!user_mode_vm(regs)) {
24131 + if (!user_mode(regs)) {
24132 unsigned long stack = kernel_stack_pointer(regs);
24133 if (depth)
24134 dump_trace(NULL, regs, (unsigned long *)stack, 0,
24135 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
24136 index cb29191..036766d 100644
24137 --- a/arch/x86/pci/mrst.c
24138 +++ b/arch/x86/pci/mrst.c
24139 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
24140 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
24141 pci_mmcfg_late_init();
24142 pcibios_enable_irq = mrst_pci_irq_enable;
24143 - pci_root_ops = pci_mrst_ops;
24144 + pax_open_kernel();
24145 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
24146 + pax_close_kernel();
24147 /* Continue with standard init */
24148 return 1;
24149 }
24150 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
24151 index db0e9a5..0372c14 100644
24152 --- a/arch/x86/pci/pcbios.c
24153 +++ b/arch/x86/pci/pcbios.c
24154 @@ -79,50 +79,93 @@ union bios32 {
24155 static struct {
24156 unsigned long address;
24157 unsigned short segment;
24158 -} bios32_indirect = { 0, __KERNEL_CS };
24159 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
24160
24161 /*
24162 * Returns the entry point for the given service, NULL on error
24163 */
24164
24165 -static unsigned long bios32_service(unsigned long service)
24166 +static unsigned long __devinit bios32_service(unsigned long service)
24167 {
24168 unsigned char return_code; /* %al */
24169 unsigned long address; /* %ebx */
24170 unsigned long length; /* %ecx */
24171 unsigned long entry; /* %edx */
24172 unsigned long flags;
24173 + struct desc_struct d, *gdt;
24174
24175 local_irq_save(flags);
24176 - __asm__("lcall *(%%edi); cld"
24177 +
24178 + gdt = get_cpu_gdt_table(smp_processor_id());
24179 +
24180 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
24181 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24182 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
24183 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24184 +
24185 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
24186 : "=a" (return_code),
24187 "=b" (address),
24188 "=c" (length),
24189 "=d" (entry)
24190 : "0" (service),
24191 "1" (0),
24192 - "D" (&bios32_indirect));
24193 + "D" (&bios32_indirect),
24194 + "r"(__PCIBIOS_DS)
24195 + : "memory");
24196 +
24197 + pax_open_kernel();
24198 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
24199 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
24200 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
24201 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
24202 + pax_close_kernel();
24203 +
24204 local_irq_restore(flags);
24205
24206 switch (return_code) {
24207 - case 0:
24208 - return address + entry;
24209 - case 0x80: /* Not present */
24210 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24211 - return 0;
24212 - default: /* Shouldn't happen */
24213 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24214 - service, return_code);
24215 + case 0: {
24216 + int cpu;
24217 + unsigned char flags;
24218 +
24219 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
24220 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
24221 + printk(KERN_WARNING "bios32_service: not valid\n");
24222 return 0;
24223 + }
24224 + address = address + PAGE_OFFSET;
24225 + length += 16UL; /* some BIOSs underreport this... */
24226 + flags = 4;
24227 + if (length >= 64*1024*1024) {
24228 + length >>= PAGE_SHIFT;
24229 + flags |= 8;
24230 + }
24231 +
24232 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24233 + gdt = get_cpu_gdt_table(cpu);
24234 + pack_descriptor(&d, address, length, 0x9b, flags);
24235 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24236 + pack_descriptor(&d, address, length, 0x93, flags);
24237 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24238 + }
24239 + return entry;
24240 + }
24241 + case 0x80: /* Not present */
24242 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24243 + return 0;
24244 + default: /* Shouldn't happen */
24245 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24246 + service, return_code);
24247 + return 0;
24248 }
24249 }
24250
24251 static struct {
24252 unsigned long address;
24253 unsigned short segment;
24254 -} pci_indirect = { 0, __KERNEL_CS };
24255 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
24256
24257 -static int pci_bios_present;
24258 +static int pci_bios_present __read_only;
24259
24260 static int __devinit check_pcibios(void)
24261 {
24262 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
24263 unsigned long flags, pcibios_entry;
24264
24265 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
24266 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
24267 + pci_indirect.address = pcibios_entry;
24268
24269 local_irq_save(flags);
24270 - __asm__(
24271 - "lcall *(%%edi); cld\n\t"
24272 + __asm__("movw %w6, %%ds\n\t"
24273 + "lcall *%%ss:(%%edi); cld\n\t"
24274 + "push %%ss\n\t"
24275 + "pop %%ds\n\t"
24276 "jc 1f\n\t"
24277 "xor %%ah, %%ah\n"
24278 "1:"
24279 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
24280 "=b" (ebx),
24281 "=c" (ecx)
24282 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
24283 - "D" (&pci_indirect)
24284 + "D" (&pci_indirect),
24285 + "r" (__PCIBIOS_DS)
24286 : "memory");
24287 local_irq_restore(flags);
24288
24289 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24290
24291 switch (len) {
24292 case 1:
24293 - __asm__("lcall *(%%esi); cld\n\t"
24294 + __asm__("movw %w6, %%ds\n\t"
24295 + "lcall *%%ss:(%%esi); cld\n\t"
24296 + "push %%ss\n\t"
24297 + "pop %%ds\n\t"
24298 "jc 1f\n\t"
24299 "xor %%ah, %%ah\n"
24300 "1:"
24301 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24302 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24303 "b" (bx),
24304 "D" ((long)reg),
24305 - "S" (&pci_indirect));
24306 + "S" (&pci_indirect),
24307 + "r" (__PCIBIOS_DS));
24308 /*
24309 * Zero-extend the result beyond 8 bits, do not trust the
24310 * BIOS having done it:
24311 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24312 *value &= 0xff;
24313 break;
24314 case 2:
24315 - __asm__("lcall *(%%esi); cld\n\t"
24316 + __asm__("movw %w6, %%ds\n\t"
24317 + "lcall *%%ss:(%%esi); cld\n\t"
24318 + "push %%ss\n\t"
24319 + "pop %%ds\n\t"
24320 "jc 1f\n\t"
24321 "xor %%ah, %%ah\n"
24322 "1:"
24323 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24324 : "1" (PCIBIOS_READ_CONFIG_WORD),
24325 "b" (bx),
24326 "D" ((long)reg),
24327 - "S" (&pci_indirect));
24328 + "S" (&pci_indirect),
24329 + "r" (__PCIBIOS_DS));
24330 /*
24331 * Zero-extend the result beyond 16 bits, do not trust the
24332 * BIOS having done it:
24333 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24334 *value &= 0xffff;
24335 break;
24336 case 4:
24337 - __asm__("lcall *(%%esi); cld\n\t"
24338 + __asm__("movw %w6, %%ds\n\t"
24339 + "lcall *%%ss:(%%esi); cld\n\t"
24340 + "push %%ss\n\t"
24341 + "pop %%ds\n\t"
24342 "jc 1f\n\t"
24343 "xor %%ah, %%ah\n"
24344 "1:"
24345 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24346 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24347 "b" (bx),
24348 "D" ((long)reg),
24349 - "S" (&pci_indirect));
24350 + "S" (&pci_indirect),
24351 + "r" (__PCIBIOS_DS));
24352 break;
24353 }
24354
24355 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24356
24357 switch (len) {
24358 case 1:
24359 - __asm__("lcall *(%%esi); cld\n\t"
24360 + __asm__("movw %w6, %%ds\n\t"
24361 + "lcall *%%ss:(%%esi); cld\n\t"
24362 + "push %%ss\n\t"
24363 + "pop %%ds\n\t"
24364 "jc 1f\n\t"
24365 "xor %%ah, %%ah\n"
24366 "1:"
24367 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24368 "c" (value),
24369 "b" (bx),
24370 "D" ((long)reg),
24371 - "S" (&pci_indirect));
24372 + "S" (&pci_indirect),
24373 + "r" (__PCIBIOS_DS));
24374 break;
24375 case 2:
24376 - __asm__("lcall *(%%esi); cld\n\t"
24377 + __asm__("movw %w6, %%ds\n\t"
24378 + "lcall *%%ss:(%%esi); cld\n\t"
24379 + "push %%ss\n\t"
24380 + "pop %%ds\n\t"
24381 "jc 1f\n\t"
24382 "xor %%ah, %%ah\n"
24383 "1:"
24384 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24385 "c" (value),
24386 "b" (bx),
24387 "D" ((long)reg),
24388 - "S" (&pci_indirect));
24389 + "S" (&pci_indirect),
24390 + "r" (__PCIBIOS_DS));
24391 break;
24392 case 4:
24393 - __asm__("lcall *(%%esi); cld\n\t"
24394 + __asm__("movw %w6, %%ds\n\t"
24395 + "lcall *%%ss:(%%esi); cld\n\t"
24396 + "push %%ss\n\t"
24397 + "pop %%ds\n\t"
24398 "jc 1f\n\t"
24399 "xor %%ah, %%ah\n"
24400 "1:"
24401 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24402 "c" (value),
24403 "b" (bx),
24404 "D" ((long)reg),
24405 - "S" (&pci_indirect));
24406 + "S" (&pci_indirect),
24407 + "r" (__PCIBIOS_DS));
24408 break;
24409 }
24410
24411 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24412
24413 DBG("PCI: Fetching IRQ routing table... ");
24414 __asm__("push %%es\n\t"
24415 + "movw %w8, %%ds\n\t"
24416 "push %%ds\n\t"
24417 "pop %%es\n\t"
24418 - "lcall *(%%esi); cld\n\t"
24419 + "lcall *%%ss:(%%esi); cld\n\t"
24420 "pop %%es\n\t"
24421 + "push %%ss\n\t"
24422 + "pop %%ds\n"
24423 "jc 1f\n\t"
24424 "xor %%ah, %%ah\n"
24425 "1:"
24426 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24427 "1" (0),
24428 "D" ((long) &opt),
24429 "S" (&pci_indirect),
24430 - "m" (opt)
24431 + "m" (opt),
24432 + "r" (__PCIBIOS_DS)
24433 : "memory");
24434 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24435 if (ret & 0xff00)
24436 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24437 {
24438 int ret;
24439
24440 - __asm__("lcall *(%%esi); cld\n\t"
24441 + __asm__("movw %w5, %%ds\n\t"
24442 + "lcall *%%ss:(%%esi); cld\n\t"
24443 + "push %%ss\n\t"
24444 + "pop %%ds\n"
24445 "jc 1f\n\t"
24446 "xor %%ah, %%ah\n"
24447 "1:"
24448 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24449 : "0" (PCIBIOS_SET_PCI_HW_INT),
24450 "b" ((dev->bus->number << 8) | dev->devfn),
24451 "c" ((irq << 8) | (pin + 10)),
24452 - "S" (&pci_indirect));
24453 + "S" (&pci_indirect),
24454 + "r" (__PCIBIOS_DS));
24455 return !(ret & 0xff00);
24456 }
24457 EXPORT_SYMBOL(pcibios_set_irq_routing);
24458 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24459 index 40e4469..1ab536e 100644
24460 --- a/arch/x86/platform/efi/efi_32.c
24461 +++ b/arch/x86/platform/efi/efi_32.c
24462 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24463 {
24464 struct desc_ptr gdt_descr;
24465
24466 +#ifdef CONFIG_PAX_KERNEXEC
24467 + struct desc_struct d;
24468 +#endif
24469 +
24470 local_irq_save(efi_rt_eflags);
24471
24472 load_cr3(initial_page_table);
24473 __flush_tlb_all();
24474
24475 +#ifdef CONFIG_PAX_KERNEXEC
24476 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24477 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24478 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24479 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24480 +#endif
24481 +
24482 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24483 gdt_descr.size = GDT_SIZE - 1;
24484 load_gdt(&gdt_descr);
24485 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24486 {
24487 struct desc_ptr gdt_descr;
24488
24489 +#ifdef CONFIG_PAX_KERNEXEC
24490 + struct desc_struct d;
24491 +
24492 + memset(&d, 0, sizeof d);
24493 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24494 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24495 +#endif
24496 +
24497 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24498 gdt_descr.size = GDT_SIZE - 1;
24499 load_gdt(&gdt_descr);
24500 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24501 index fbe66e6..c5c0dd2 100644
24502 --- a/arch/x86/platform/efi/efi_stub_32.S
24503 +++ b/arch/x86/platform/efi/efi_stub_32.S
24504 @@ -6,7 +6,9 @@
24505 */
24506
24507 #include <linux/linkage.h>
24508 +#include <linux/init.h>
24509 #include <asm/page_types.h>
24510 +#include <asm/segment.h>
24511
24512 /*
24513 * efi_call_phys(void *, ...) is a function with variable parameters.
24514 @@ -20,7 +22,7 @@
24515 * service functions will comply with gcc calling convention, too.
24516 */
24517
24518 -.text
24519 +__INIT
24520 ENTRY(efi_call_phys)
24521 /*
24522 * 0. The function can only be called in Linux kernel. So CS has been
24523 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24524 * The mapping of lower virtual memory has been created in prelog and
24525 * epilog.
24526 */
24527 - movl $1f, %edx
24528 - subl $__PAGE_OFFSET, %edx
24529 - jmp *%edx
24530 + movl $(__KERNEXEC_EFI_DS), %edx
24531 + mov %edx, %ds
24532 + mov %edx, %es
24533 + mov %edx, %ss
24534 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24535 1:
24536
24537 /*
24538 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24539 * parameter 2, ..., param n. To make things easy, we save the return
24540 * address of efi_call_phys in a global variable.
24541 */
24542 - popl %edx
24543 - movl %edx, saved_return_addr
24544 - /* get the function pointer into ECX*/
24545 - popl %ecx
24546 - movl %ecx, efi_rt_function_ptr
24547 - movl $2f, %edx
24548 - subl $__PAGE_OFFSET, %edx
24549 - pushl %edx
24550 + popl (saved_return_addr)
24551 + popl (efi_rt_function_ptr)
24552
24553 /*
24554 * 3. Clear PG bit in %CR0.
24555 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24556 /*
24557 * 5. Call the physical function.
24558 */
24559 - jmp *%ecx
24560 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
24561
24562 -2:
24563 /*
24564 * 6. After EFI runtime service returns, control will return to
24565 * following instruction. We'd better readjust stack pointer first.
24566 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24567 movl %cr0, %edx
24568 orl $0x80000000, %edx
24569 movl %edx, %cr0
24570 - jmp 1f
24571 -1:
24572 +
24573 /*
24574 * 8. Now restore the virtual mode from flat mode by
24575 * adding EIP with PAGE_OFFSET.
24576 */
24577 - movl $1f, %edx
24578 - jmp *%edx
24579 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24580 1:
24581 + movl $(__KERNEL_DS), %edx
24582 + mov %edx, %ds
24583 + mov %edx, %es
24584 + mov %edx, %ss
24585
24586 /*
24587 * 9. Balance the stack. And because EAX contain the return value,
24588 * we'd better not clobber it.
24589 */
24590 - leal efi_rt_function_ptr, %edx
24591 - movl (%edx), %ecx
24592 - pushl %ecx
24593 + pushl (efi_rt_function_ptr)
24594
24595 /*
24596 - * 10. Push the saved return address onto the stack and return.
24597 + * 10. Return to the saved return address.
24598 */
24599 - leal saved_return_addr, %edx
24600 - movl (%edx), %ecx
24601 - pushl %ecx
24602 - ret
24603 + jmpl *(saved_return_addr)
24604 ENDPROC(efi_call_phys)
24605 .previous
24606
24607 -.data
24608 +__INITDATA
24609 saved_return_addr:
24610 .long 0
24611 efi_rt_function_ptr:
24612 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24613 index 4c07cca..2c8427d 100644
24614 --- a/arch/x86/platform/efi/efi_stub_64.S
24615 +++ b/arch/x86/platform/efi/efi_stub_64.S
24616 @@ -7,6 +7,7 @@
24617 */
24618
24619 #include <linux/linkage.h>
24620 +#include <asm/alternative-asm.h>
24621
24622 #define SAVE_XMM \
24623 mov %rsp, %rax; \
24624 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
24625 call *%rdi
24626 addq $32, %rsp
24627 RESTORE_XMM
24628 + pax_force_retaddr 0, 1
24629 ret
24630 ENDPROC(efi_call0)
24631
24632 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
24633 call *%rdi
24634 addq $32, %rsp
24635 RESTORE_XMM
24636 + pax_force_retaddr 0, 1
24637 ret
24638 ENDPROC(efi_call1)
24639
24640 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
24641 call *%rdi
24642 addq $32, %rsp
24643 RESTORE_XMM
24644 + pax_force_retaddr 0, 1
24645 ret
24646 ENDPROC(efi_call2)
24647
24648 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
24649 call *%rdi
24650 addq $32, %rsp
24651 RESTORE_XMM
24652 + pax_force_retaddr 0, 1
24653 ret
24654 ENDPROC(efi_call3)
24655
24656 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
24657 call *%rdi
24658 addq $32, %rsp
24659 RESTORE_XMM
24660 + pax_force_retaddr 0, 1
24661 ret
24662 ENDPROC(efi_call4)
24663
24664 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
24665 call *%rdi
24666 addq $48, %rsp
24667 RESTORE_XMM
24668 + pax_force_retaddr 0, 1
24669 ret
24670 ENDPROC(efi_call5)
24671
24672 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
24673 call *%rdi
24674 addq $48, %rsp
24675 RESTORE_XMM
24676 + pax_force_retaddr 0, 1
24677 ret
24678 ENDPROC(efi_call6)
24679 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24680 index ad4ec1c..686479e 100644
24681 --- a/arch/x86/platform/mrst/mrst.c
24682 +++ b/arch/x86/platform/mrst/mrst.c
24683 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24684 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24685 int sfi_mrtc_num;
24686
24687 -static void mrst_power_off(void)
24688 +static __noreturn void mrst_power_off(void)
24689 {
24690 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24691 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24692 + BUG();
24693 }
24694
24695 -static void mrst_reboot(void)
24696 +static __noreturn void mrst_reboot(void)
24697 {
24698 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24699 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24700 else
24701 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24702 + BUG();
24703 }
24704
24705 /* parse all the mtimer info to a static mtimer array */
24706 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24707 index f10c0af..3ec1f95 100644
24708 --- a/arch/x86/power/cpu.c
24709 +++ b/arch/x86/power/cpu.c
24710 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
24711 static void fix_processor_context(void)
24712 {
24713 int cpu = smp_processor_id();
24714 - struct tss_struct *t = &per_cpu(init_tss, cpu);
24715 + struct tss_struct *t = init_tss + cpu;
24716
24717 set_tss_desc(cpu, t); /*
24718 * This just modifies memory; should not be
24719 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
24720 */
24721
24722 #ifdef CONFIG_X86_64
24723 + pax_open_kernel();
24724 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24725 + pax_close_kernel();
24726
24727 syscall_init(); /* This sets MSR_*STAR and related */
24728 #endif
24729 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24730 index 5d17950..2253fc9 100644
24731 --- a/arch/x86/vdso/Makefile
24732 +++ b/arch/x86/vdso/Makefile
24733 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24734 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24735 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24736
24737 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24738 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24739 GCOV_PROFILE := n
24740
24741 #
24742 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24743 index 468d591..8e80a0a 100644
24744 --- a/arch/x86/vdso/vdso32-setup.c
24745 +++ b/arch/x86/vdso/vdso32-setup.c
24746 @@ -25,6 +25,7 @@
24747 #include <asm/tlbflush.h>
24748 #include <asm/vdso.h>
24749 #include <asm/proto.h>
24750 +#include <asm/mman.h>
24751
24752 enum {
24753 VDSO_DISABLED = 0,
24754 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24755 void enable_sep_cpu(void)
24756 {
24757 int cpu = get_cpu();
24758 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
24759 + struct tss_struct *tss = init_tss + cpu;
24760
24761 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24762 put_cpu();
24763 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24764 gate_vma.vm_start = FIXADDR_USER_START;
24765 gate_vma.vm_end = FIXADDR_USER_END;
24766 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24767 - gate_vma.vm_page_prot = __P101;
24768 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24769 /*
24770 * Make sure the vDSO gets into every core dump.
24771 * Dumping its contents makes post-mortem fully interpretable later
24772 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24773 if (compat)
24774 addr = VDSO_HIGH_BASE;
24775 else {
24776 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24777 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24778 if (IS_ERR_VALUE(addr)) {
24779 ret = addr;
24780 goto up_fail;
24781 }
24782 }
24783
24784 - current->mm->context.vdso = (void *)addr;
24785 + current->mm->context.vdso = addr;
24786
24787 if (compat_uses_vma || !compat) {
24788 /*
24789 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24790 }
24791
24792 current_thread_info()->sysenter_return =
24793 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24794 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24795
24796 up_fail:
24797 if (ret)
24798 - current->mm->context.vdso = NULL;
24799 + current->mm->context.vdso = 0;
24800
24801 up_write(&mm->mmap_sem);
24802
24803 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24804
24805 const char *arch_vma_name(struct vm_area_struct *vma)
24806 {
24807 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24808 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24809 return "[vdso]";
24810 +
24811 +#ifdef CONFIG_PAX_SEGMEXEC
24812 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24813 + return "[vdso]";
24814 +#endif
24815 +
24816 return NULL;
24817 }
24818
24819 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24820 * Check to see if the corresponding task was created in compat vdso
24821 * mode.
24822 */
24823 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24824 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24825 return &gate_vma;
24826 return NULL;
24827 }
24828 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24829 index 153407c..611cba9 100644
24830 --- a/arch/x86/vdso/vma.c
24831 +++ b/arch/x86/vdso/vma.c
24832 @@ -16,8 +16,6 @@
24833 #include <asm/vdso.h>
24834 #include <asm/page.h>
24835
24836 -unsigned int __read_mostly vdso_enabled = 1;
24837 -
24838 extern char vdso_start[], vdso_end[];
24839 extern unsigned short vdso_sync_cpuid;
24840
24841 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24842 * unaligned here as a result of stack start randomization.
24843 */
24844 addr = PAGE_ALIGN(addr);
24845 - addr = align_addr(addr, NULL, ALIGN_VDSO);
24846
24847 return addr;
24848 }
24849 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24850 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24851 {
24852 struct mm_struct *mm = current->mm;
24853 - unsigned long addr;
24854 + unsigned long addr = 0;
24855 int ret;
24856
24857 - if (!vdso_enabled)
24858 - return 0;
24859 -
24860 down_write(&mm->mmap_sem);
24861 +
24862 +#ifdef CONFIG_PAX_RANDMMAP
24863 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24864 +#endif
24865 +
24866 addr = vdso_addr(mm->start_stack, vdso_size);
24867 + addr = align_addr(addr, NULL, ALIGN_VDSO);
24868 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24869 if (IS_ERR_VALUE(addr)) {
24870 ret = addr;
24871 goto up_fail;
24872 }
24873
24874 - current->mm->context.vdso = (void *)addr;
24875 + mm->context.vdso = addr;
24876
24877 ret = install_special_mapping(mm, addr, vdso_size,
24878 VM_READ|VM_EXEC|
24879 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24880 VM_ALWAYSDUMP,
24881 vdso_pages);
24882 - if (ret) {
24883 - current->mm->context.vdso = NULL;
24884 - goto up_fail;
24885 - }
24886 +
24887 + if (ret)
24888 + mm->context.vdso = 0;
24889
24890 up_fail:
24891 up_write(&mm->mmap_sem);
24892 return ret;
24893 }
24894 -
24895 -static __init int vdso_setup(char *s)
24896 -{
24897 - vdso_enabled = simple_strtoul(s, NULL, 0);
24898 - return 0;
24899 -}
24900 -__setup("vdso=", vdso_setup);
24901 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24902 index 1f92865..c843b20 100644
24903 --- a/arch/x86/xen/enlighten.c
24904 +++ b/arch/x86/xen/enlighten.c
24905 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24906
24907 struct shared_info xen_dummy_shared_info;
24908
24909 -void *xen_initial_gdt;
24910 -
24911 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24912 __read_mostly int xen_have_vector_callback;
24913 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24914 @@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24915 #endif
24916 };
24917
24918 -static void xen_reboot(int reason)
24919 +static __noreturn void xen_reboot(int reason)
24920 {
24921 struct sched_shutdown r = { .reason = reason };
24922
24923 @@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24924 BUG();
24925 }
24926
24927 -static void xen_restart(char *msg)
24928 +static __noreturn void xen_restart(char *msg)
24929 {
24930 xen_reboot(SHUTDOWN_reboot);
24931 }
24932
24933 -static void xen_emergency_restart(void)
24934 +static __noreturn void xen_emergency_restart(void)
24935 {
24936 xen_reboot(SHUTDOWN_reboot);
24937 }
24938
24939 -static void xen_machine_halt(void)
24940 +static __noreturn void xen_machine_halt(void)
24941 {
24942 xen_reboot(SHUTDOWN_poweroff);
24943 }
24944 @@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24945 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24946
24947 /* Work out if we support NX */
24948 - x86_configure_nx();
24949 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24950 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24951 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24952 + unsigned l, h;
24953 +
24954 + __supported_pte_mask |= _PAGE_NX;
24955 + rdmsr(MSR_EFER, l, h);
24956 + l |= EFER_NX;
24957 + wrmsr(MSR_EFER, l, h);
24958 + }
24959 +#endif
24960
24961 xen_setup_features();
24962
24963 @@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24964
24965 machine_ops = xen_machine_ops;
24966
24967 - /*
24968 - * The only reliable way to retain the initial address of the
24969 - * percpu gdt_page is to remember it here, so we can go and
24970 - * mark it RW later, when the initial percpu area is freed.
24971 - */
24972 - xen_initial_gdt = &per_cpu(gdt_page, 0);
24973 -
24974 xen_smp_init();
24975
24976 #ifdef CONFIG_ACPI_NUMA
24977 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24978 index 87f6673..e2555a6 100644
24979 --- a/arch/x86/xen/mmu.c
24980 +++ b/arch/x86/xen/mmu.c
24981 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24982 convert_pfn_mfn(init_level4_pgt);
24983 convert_pfn_mfn(level3_ident_pgt);
24984 convert_pfn_mfn(level3_kernel_pgt);
24985 + convert_pfn_mfn(level3_vmalloc_start_pgt);
24986 + convert_pfn_mfn(level3_vmalloc_end_pgt);
24987 + convert_pfn_mfn(level3_vmemmap_pgt);
24988
24989 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24990 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24991 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24992 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24993 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24994 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24995 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24996 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24997 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24998 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24999 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
25000 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
25001 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
25002
25003 @@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
25004 pv_mmu_ops.set_pud = xen_set_pud;
25005 #if PAGETABLE_LEVELS == 4
25006 pv_mmu_ops.set_pgd = xen_set_pgd;
25007 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
25008 #endif
25009
25010 /* This will work as long as patching hasn't happened yet
25011 @@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
25012 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
25013 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
25014 .set_pgd = xen_set_pgd_hyper,
25015 + .set_pgd_batched = xen_set_pgd_hyper,
25016
25017 .alloc_pud = xen_alloc_pmd_init,
25018 .release_pud = xen_release_pmd_init,
25019 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
25020 index 041d4fe..7666b7e 100644
25021 --- a/arch/x86/xen/smp.c
25022 +++ b/arch/x86/xen/smp.c
25023 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
25024 {
25025 BUG_ON(smp_processor_id() != 0);
25026 native_smp_prepare_boot_cpu();
25027 -
25028 - /* We've switched to the "real" per-cpu gdt, so make sure the
25029 - old memory can be recycled */
25030 - make_lowmem_page_readwrite(xen_initial_gdt);
25031 -
25032 xen_filter_cpu_maps();
25033 xen_setup_vcpu_info_placement();
25034 }
25035 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
25036 gdt = get_cpu_gdt_table(cpu);
25037
25038 ctxt->flags = VGCF_IN_KERNEL;
25039 - ctxt->user_regs.ds = __USER_DS;
25040 - ctxt->user_regs.es = __USER_DS;
25041 + ctxt->user_regs.ds = __KERNEL_DS;
25042 + ctxt->user_regs.es = __KERNEL_DS;
25043 ctxt->user_regs.ss = __KERNEL_DS;
25044 #ifdef CONFIG_X86_32
25045 ctxt->user_regs.fs = __KERNEL_PERCPU;
25046 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
25047 + savesegment(gs, ctxt->user_regs.gs);
25048 #else
25049 ctxt->gs_base_kernel = per_cpu_offset(cpu);
25050 #endif
25051 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
25052 int rc;
25053
25054 per_cpu(current_task, cpu) = idle;
25055 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
25056 #ifdef CONFIG_X86_32
25057 irq_ctx_init(cpu);
25058 #else
25059 clear_tsk_thread_flag(idle, TIF_FORK);
25060 - per_cpu(kernel_stack, cpu) =
25061 - (unsigned long)task_stack_page(idle) -
25062 - KERNEL_STACK_OFFSET + THREAD_SIZE;
25063 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
25064 #endif
25065 xen_setup_runstate_info(cpu);
25066 xen_setup_timer(cpu);
25067 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
25068 index b040b0e..8cc4fe0 100644
25069 --- a/arch/x86/xen/xen-asm_32.S
25070 +++ b/arch/x86/xen/xen-asm_32.S
25071 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
25072 ESP_OFFSET=4 # bytes pushed onto stack
25073
25074 /*
25075 - * Store vcpu_info pointer for easy access. Do it this way to
25076 - * avoid having to reload %fs
25077 + * Store vcpu_info pointer for easy access.
25078 */
25079 #ifdef CONFIG_SMP
25080 - GET_THREAD_INFO(%eax)
25081 - movl TI_cpu(%eax), %eax
25082 - movl __per_cpu_offset(,%eax,4), %eax
25083 - mov xen_vcpu(%eax), %eax
25084 + push %fs
25085 + mov $(__KERNEL_PERCPU), %eax
25086 + mov %eax, %fs
25087 + mov PER_CPU_VAR(xen_vcpu), %eax
25088 + pop %fs
25089 #else
25090 movl xen_vcpu, %eax
25091 #endif
25092 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
25093 index aaa7291..3f77960 100644
25094 --- a/arch/x86/xen/xen-head.S
25095 +++ b/arch/x86/xen/xen-head.S
25096 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
25097 #ifdef CONFIG_X86_32
25098 mov %esi,xen_start_info
25099 mov $init_thread_union+THREAD_SIZE,%esp
25100 +#ifdef CONFIG_SMP
25101 + movl $cpu_gdt_table,%edi
25102 + movl $__per_cpu_load,%eax
25103 + movw %ax,__KERNEL_PERCPU + 2(%edi)
25104 + rorl $16,%eax
25105 + movb %al,__KERNEL_PERCPU + 4(%edi)
25106 + movb %ah,__KERNEL_PERCPU + 7(%edi)
25107 + movl $__per_cpu_end - 1,%eax
25108 + subl $__per_cpu_start,%eax
25109 + movw %ax,__KERNEL_PERCPU + 0(%edi)
25110 +#endif
25111 #else
25112 mov %rsi,xen_start_info
25113 mov $init_thread_union+THREAD_SIZE,%rsp
25114 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
25115 index b095739..8c17bcd 100644
25116 --- a/arch/x86/xen/xen-ops.h
25117 +++ b/arch/x86/xen/xen-ops.h
25118 @@ -10,8 +10,6 @@
25119 extern const char xen_hypervisor_callback[];
25120 extern const char xen_failsafe_callback[];
25121
25122 -extern void *xen_initial_gdt;
25123 -
25124 struct trap_info;
25125 void xen_copy_trap_info(struct trap_info *traps);
25126
25127 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
25128 index 58916af..9cb880b 100644
25129 --- a/block/blk-iopoll.c
25130 +++ b/block/blk-iopoll.c
25131 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
25132 }
25133 EXPORT_SYMBOL(blk_iopoll_complete);
25134
25135 -static void blk_iopoll_softirq(struct softirq_action *h)
25136 +static void blk_iopoll_softirq(void)
25137 {
25138 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
25139 int rearm = 0, budget = blk_iopoll_budget;
25140 diff --git a/block/blk-map.c b/block/blk-map.c
25141 index 623e1cd..ca1e109 100644
25142 --- a/block/blk-map.c
25143 +++ b/block/blk-map.c
25144 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
25145 if (!len || !kbuf)
25146 return -EINVAL;
25147
25148 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
25149 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
25150 if (do_copy)
25151 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
25152 else
25153 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
25154 index 1366a89..e17f54b 100644
25155 --- a/block/blk-softirq.c
25156 +++ b/block/blk-softirq.c
25157 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
25158 * Softirq action handler - move entries to local list and loop over them
25159 * while passing them to the queue registered handler.
25160 */
25161 -static void blk_done_softirq(struct softirq_action *h)
25162 +static void blk_done_softirq(void)
25163 {
25164 struct list_head *cpu_list, local_list;
25165
25166 diff --git a/block/bsg.c b/block/bsg.c
25167 index 702f131..37808bf 100644
25168 --- a/block/bsg.c
25169 +++ b/block/bsg.c
25170 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
25171 struct sg_io_v4 *hdr, struct bsg_device *bd,
25172 fmode_t has_write_perm)
25173 {
25174 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25175 + unsigned char *cmdptr;
25176 +
25177 if (hdr->request_len > BLK_MAX_CDB) {
25178 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
25179 if (!rq->cmd)
25180 return -ENOMEM;
25181 - }
25182 + cmdptr = rq->cmd;
25183 + } else
25184 + cmdptr = tmpcmd;
25185
25186 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
25187 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
25188 hdr->request_len))
25189 return -EFAULT;
25190
25191 + if (cmdptr != rq->cmd)
25192 + memcpy(rq->cmd, cmdptr, hdr->request_len);
25193 +
25194 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
25195 if (blk_verify_command(rq->cmd, has_write_perm))
25196 return -EPERM;
25197 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
25198 index 7b72502..646105c 100644
25199 --- a/block/compat_ioctl.c
25200 +++ b/block/compat_ioctl.c
25201 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
25202 err |= __get_user(f->spec1, &uf->spec1);
25203 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
25204 err |= __get_user(name, &uf->name);
25205 - f->name = compat_ptr(name);
25206 + f->name = (void __force_kernel *)compat_ptr(name);
25207 if (err) {
25208 err = -EFAULT;
25209 goto out;
25210 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
25211 index 688be8a..8a37d98 100644
25212 --- a/block/scsi_ioctl.c
25213 +++ b/block/scsi_ioctl.c
25214 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
25215 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
25216 struct sg_io_hdr *hdr, fmode_t mode)
25217 {
25218 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
25219 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25220 + unsigned char *cmdptr;
25221 +
25222 + if (rq->cmd != rq->__cmd)
25223 + cmdptr = rq->cmd;
25224 + else
25225 + cmdptr = tmpcmd;
25226 +
25227 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
25228 return -EFAULT;
25229 +
25230 + if (cmdptr != rq->cmd)
25231 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
25232 +
25233 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
25234 return -EPERM;
25235
25236 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25237 int err;
25238 unsigned int in_len, out_len, bytes, opcode, cmdlen;
25239 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
25240 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25241 + unsigned char *cmdptr;
25242
25243 if (!sic)
25244 return -EINVAL;
25245 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25246 */
25247 err = -EFAULT;
25248 rq->cmd_len = cmdlen;
25249 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
25250 +
25251 + if (rq->cmd != rq->__cmd)
25252 + cmdptr = rq->cmd;
25253 + else
25254 + cmdptr = tmpcmd;
25255 +
25256 + if (copy_from_user(cmdptr, sic->data, cmdlen))
25257 goto error;
25258
25259 + if (rq->cmd != cmdptr)
25260 + memcpy(rq->cmd, cmdptr, cmdlen);
25261 +
25262 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
25263 goto error;
25264
25265 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
25266 index 671d4d6..5f24030 100644
25267 --- a/crypto/cryptd.c
25268 +++ b/crypto/cryptd.c
25269 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
25270
25271 struct cryptd_blkcipher_request_ctx {
25272 crypto_completion_t complete;
25273 -};
25274 +} __no_const;
25275
25276 struct cryptd_hash_ctx {
25277 struct crypto_shash *child;
25278 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
25279
25280 struct cryptd_aead_request_ctx {
25281 crypto_completion_t complete;
25282 -};
25283 +} __no_const;
25284
25285 static void cryptd_queue_worker(struct work_struct *work);
25286
25287 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25288 index 5d41894..22021e4 100644
25289 --- a/drivers/acpi/apei/cper.c
25290 +++ b/drivers/acpi/apei/cper.c
25291 @@ -38,12 +38,12 @@
25292 */
25293 u64 cper_next_record_id(void)
25294 {
25295 - static atomic64_t seq;
25296 + static atomic64_unchecked_t seq;
25297
25298 - if (!atomic64_read(&seq))
25299 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
25300 + if (!atomic64_read_unchecked(&seq))
25301 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25302
25303 - return atomic64_inc_return(&seq);
25304 + return atomic64_inc_return_unchecked(&seq);
25305 }
25306 EXPORT_SYMBOL_GPL(cper_next_record_id);
25307
25308 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25309 index 6c47ae9..abfdd63 100644
25310 --- a/drivers/acpi/ec_sys.c
25311 +++ b/drivers/acpi/ec_sys.c
25312 @@ -12,6 +12,7 @@
25313 #include <linux/acpi.h>
25314 #include <linux/debugfs.h>
25315 #include <linux/module.h>
25316 +#include <linux/uaccess.h>
25317 #include "internal.h"
25318
25319 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25320 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25321 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25322 */
25323 unsigned int size = EC_SPACE_SIZE;
25324 - u8 *data = (u8 *) buf;
25325 + u8 data;
25326 loff_t init_off = *off;
25327 int err = 0;
25328
25329 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25330 size = count;
25331
25332 while (size) {
25333 - err = ec_read(*off, &data[*off - init_off]);
25334 + err = ec_read(*off, &data);
25335 if (err)
25336 return err;
25337 + if (put_user(data, &buf[*off - init_off]))
25338 + return -EFAULT;
25339 *off += 1;
25340 size--;
25341 }
25342 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25343
25344 unsigned int size = count;
25345 loff_t init_off = *off;
25346 - u8 *data = (u8 *) buf;
25347 int err = 0;
25348
25349 if (*off >= EC_SPACE_SIZE)
25350 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25351 }
25352
25353 while (size) {
25354 - u8 byte_write = data[*off - init_off];
25355 + u8 byte_write;
25356 + if (get_user(byte_write, &buf[*off - init_off]))
25357 + return -EFAULT;
25358 err = ec_write(*off, byte_write);
25359 if (err)
25360 return err;
25361 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25362 index 251c7b62..000462d 100644
25363 --- a/drivers/acpi/proc.c
25364 +++ b/drivers/acpi/proc.c
25365 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25366 size_t count, loff_t * ppos)
25367 {
25368 struct list_head *node, *next;
25369 - char strbuf[5];
25370 - char str[5] = "";
25371 - unsigned int len = count;
25372 + char strbuf[5] = {0};
25373
25374 - if (len > 4)
25375 - len = 4;
25376 - if (len < 0)
25377 + if (count > 4)
25378 + count = 4;
25379 + if (copy_from_user(strbuf, buffer, count))
25380 return -EFAULT;
25381 -
25382 - if (copy_from_user(strbuf, buffer, len))
25383 - return -EFAULT;
25384 - strbuf[len] = '\0';
25385 - sscanf(strbuf, "%s", str);
25386 + strbuf[count] = '\0';
25387
25388 mutex_lock(&acpi_device_lock);
25389 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25390 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25391 if (!dev->wakeup.flags.valid)
25392 continue;
25393
25394 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
25395 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25396 if (device_can_wakeup(&dev->dev)) {
25397 bool enable = !device_may_wakeup(&dev->dev);
25398 device_set_wakeup_enable(&dev->dev, enable);
25399 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25400 index 9d7bc9f..a6fc091 100644
25401 --- a/drivers/acpi/processor_driver.c
25402 +++ b/drivers/acpi/processor_driver.c
25403 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25404 return 0;
25405 #endif
25406
25407 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25408 + BUG_ON(pr->id >= nr_cpu_ids);
25409
25410 /*
25411 * Buggy BIOS check
25412 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25413 index c04ad68..0b99473 100644
25414 --- a/drivers/ata/libata-core.c
25415 +++ b/drivers/ata/libata-core.c
25416 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25417 struct ata_port *ap;
25418 unsigned int tag;
25419
25420 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25421 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25422 ap = qc->ap;
25423
25424 qc->flags = 0;
25425 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25426 struct ata_port *ap;
25427 struct ata_link *link;
25428
25429 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25430 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25431 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25432 ap = qc->ap;
25433 link = qc->dev->link;
25434 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25435 return;
25436
25437 spin_lock(&lock);
25438 + pax_open_kernel();
25439
25440 for (cur = ops->inherits; cur; cur = cur->inherits) {
25441 void **inherit = (void **)cur;
25442 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25443 if (IS_ERR(*pp))
25444 *pp = NULL;
25445
25446 - ops->inherits = NULL;
25447 + *(struct ata_port_operations **)&ops->inherits = NULL;
25448
25449 + pax_close_kernel();
25450 spin_unlock(&lock);
25451 }
25452
25453 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25454 index e8574bb..f9f6a72 100644
25455 --- a/drivers/ata/pata_arasan_cf.c
25456 +++ b/drivers/ata/pata_arasan_cf.c
25457 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25458 /* Handle platform specific quirks */
25459 if (pdata->quirk) {
25460 if (pdata->quirk & CF_BROKEN_PIO) {
25461 - ap->ops->set_piomode = NULL;
25462 + pax_open_kernel();
25463 + *(void **)&ap->ops->set_piomode = NULL;
25464 + pax_close_kernel();
25465 ap->pio_mask = 0;
25466 }
25467 if (pdata->quirk & CF_BROKEN_MWDMA)
25468 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25469 index f9b983a..887b9d8 100644
25470 --- a/drivers/atm/adummy.c
25471 +++ b/drivers/atm/adummy.c
25472 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25473 vcc->pop(vcc, skb);
25474 else
25475 dev_kfree_skb_any(skb);
25476 - atomic_inc(&vcc->stats->tx);
25477 + atomic_inc_unchecked(&vcc->stats->tx);
25478
25479 return 0;
25480 }
25481 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25482 index f8f41e0..1f987dd 100644
25483 --- a/drivers/atm/ambassador.c
25484 +++ b/drivers/atm/ambassador.c
25485 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25486 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25487
25488 // VC layer stats
25489 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25490 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25491
25492 // free the descriptor
25493 kfree (tx_descr);
25494 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25495 dump_skb ("<<<", vc, skb);
25496
25497 // VC layer stats
25498 - atomic_inc(&atm_vcc->stats->rx);
25499 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25500 __net_timestamp(skb);
25501 // end of our responsibility
25502 atm_vcc->push (atm_vcc, skb);
25503 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25504 } else {
25505 PRINTK (KERN_INFO, "dropped over-size frame");
25506 // should we count this?
25507 - atomic_inc(&atm_vcc->stats->rx_drop);
25508 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25509 }
25510
25511 } else {
25512 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25513 }
25514
25515 if (check_area (skb->data, skb->len)) {
25516 - atomic_inc(&atm_vcc->stats->tx_err);
25517 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25518 return -ENOMEM; // ?
25519 }
25520
25521 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25522 index b22d71c..d6e1049 100644
25523 --- a/drivers/atm/atmtcp.c
25524 +++ b/drivers/atm/atmtcp.c
25525 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25526 if (vcc->pop) vcc->pop(vcc,skb);
25527 else dev_kfree_skb(skb);
25528 if (dev_data) return 0;
25529 - atomic_inc(&vcc->stats->tx_err);
25530 + atomic_inc_unchecked(&vcc->stats->tx_err);
25531 return -ENOLINK;
25532 }
25533 size = skb->len+sizeof(struct atmtcp_hdr);
25534 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25535 if (!new_skb) {
25536 if (vcc->pop) vcc->pop(vcc,skb);
25537 else dev_kfree_skb(skb);
25538 - atomic_inc(&vcc->stats->tx_err);
25539 + atomic_inc_unchecked(&vcc->stats->tx_err);
25540 return -ENOBUFS;
25541 }
25542 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25543 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25544 if (vcc->pop) vcc->pop(vcc,skb);
25545 else dev_kfree_skb(skb);
25546 out_vcc->push(out_vcc,new_skb);
25547 - atomic_inc(&vcc->stats->tx);
25548 - atomic_inc(&out_vcc->stats->rx);
25549 + atomic_inc_unchecked(&vcc->stats->tx);
25550 + atomic_inc_unchecked(&out_vcc->stats->rx);
25551 return 0;
25552 }
25553
25554 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25555 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25556 read_unlock(&vcc_sklist_lock);
25557 if (!out_vcc) {
25558 - atomic_inc(&vcc->stats->tx_err);
25559 + atomic_inc_unchecked(&vcc->stats->tx_err);
25560 goto done;
25561 }
25562 skb_pull(skb,sizeof(struct atmtcp_hdr));
25563 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25564 __net_timestamp(new_skb);
25565 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25566 out_vcc->push(out_vcc,new_skb);
25567 - atomic_inc(&vcc->stats->tx);
25568 - atomic_inc(&out_vcc->stats->rx);
25569 + atomic_inc_unchecked(&vcc->stats->tx);
25570 + atomic_inc_unchecked(&out_vcc->stats->rx);
25571 done:
25572 if (vcc->pop) vcc->pop(vcc,skb);
25573 else dev_kfree_skb(skb);
25574 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25575 index 956e9ac..133516d 100644
25576 --- a/drivers/atm/eni.c
25577 +++ b/drivers/atm/eni.c
25578 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25579 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25580 vcc->dev->number);
25581 length = 0;
25582 - atomic_inc(&vcc->stats->rx_err);
25583 + atomic_inc_unchecked(&vcc->stats->rx_err);
25584 }
25585 else {
25586 length = ATM_CELL_SIZE-1; /* no HEC */
25587 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25588 size);
25589 }
25590 eff = length = 0;
25591 - atomic_inc(&vcc->stats->rx_err);
25592 + atomic_inc_unchecked(&vcc->stats->rx_err);
25593 }
25594 else {
25595 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25596 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25597 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25598 vcc->dev->number,vcc->vci,length,size << 2,descr);
25599 length = eff = 0;
25600 - atomic_inc(&vcc->stats->rx_err);
25601 + atomic_inc_unchecked(&vcc->stats->rx_err);
25602 }
25603 }
25604 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25605 @@ -771,7 +771,7 @@ rx_dequeued++;
25606 vcc->push(vcc,skb);
25607 pushed++;
25608 }
25609 - atomic_inc(&vcc->stats->rx);
25610 + atomic_inc_unchecked(&vcc->stats->rx);
25611 }
25612 wake_up(&eni_dev->rx_wait);
25613 }
25614 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25615 PCI_DMA_TODEVICE);
25616 if (vcc->pop) vcc->pop(vcc,skb);
25617 else dev_kfree_skb_irq(skb);
25618 - atomic_inc(&vcc->stats->tx);
25619 + atomic_inc_unchecked(&vcc->stats->tx);
25620 wake_up(&eni_dev->tx_wait);
25621 dma_complete++;
25622 }
25623 @@ -1569,7 +1569,7 @@ tx_complete++;
25624 /*--------------------------------- entries ---------------------------------*/
25625
25626
25627 -static const char *media_name[] __devinitdata = {
25628 +static const char *media_name[] __devinitconst = {
25629 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25630 "UTP", "05?", "06?", "07?", /* 4- 7 */
25631 "TAXI","09?", "10?", "11?", /* 8-11 */
25632 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25633 index 5072f8a..fa52520d 100644
25634 --- a/drivers/atm/firestream.c
25635 +++ b/drivers/atm/firestream.c
25636 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25637 }
25638 }
25639
25640 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25641 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25642
25643 fs_dprintk (FS_DEBUG_TXMEM, "i");
25644 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25645 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25646 #endif
25647 skb_put (skb, qe->p1 & 0xffff);
25648 ATM_SKB(skb)->vcc = atm_vcc;
25649 - atomic_inc(&atm_vcc->stats->rx);
25650 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25651 __net_timestamp(skb);
25652 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25653 atm_vcc->push (atm_vcc, skb);
25654 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25655 kfree (pe);
25656 }
25657 if (atm_vcc)
25658 - atomic_inc(&atm_vcc->stats->rx_drop);
25659 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25660 break;
25661 case 0x1f: /* Reassembly abort: no buffers. */
25662 /* Silently increment error counter. */
25663 if (atm_vcc)
25664 - atomic_inc(&atm_vcc->stats->rx_drop);
25665 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25666 break;
25667 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25668 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25669 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25670 index 361f5ae..7fc552d 100644
25671 --- a/drivers/atm/fore200e.c
25672 +++ b/drivers/atm/fore200e.c
25673 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25674 #endif
25675 /* check error condition */
25676 if (*entry->status & STATUS_ERROR)
25677 - atomic_inc(&vcc->stats->tx_err);
25678 + atomic_inc_unchecked(&vcc->stats->tx_err);
25679 else
25680 - atomic_inc(&vcc->stats->tx);
25681 + atomic_inc_unchecked(&vcc->stats->tx);
25682 }
25683 }
25684
25685 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25686 if (skb == NULL) {
25687 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25688
25689 - atomic_inc(&vcc->stats->rx_drop);
25690 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25691 return -ENOMEM;
25692 }
25693
25694 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25695
25696 dev_kfree_skb_any(skb);
25697
25698 - atomic_inc(&vcc->stats->rx_drop);
25699 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25700 return -ENOMEM;
25701 }
25702
25703 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25704
25705 vcc->push(vcc, skb);
25706 - atomic_inc(&vcc->stats->rx);
25707 + atomic_inc_unchecked(&vcc->stats->rx);
25708
25709 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25710
25711 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25712 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25713 fore200e->atm_dev->number,
25714 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25715 - atomic_inc(&vcc->stats->rx_err);
25716 + atomic_inc_unchecked(&vcc->stats->rx_err);
25717 }
25718 }
25719
25720 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25721 goto retry_here;
25722 }
25723
25724 - atomic_inc(&vcc->stats->tx_err);
25725 + atomic_inc_unchecked(&vcc->stats->tx_err);
25726
25727 fore200e->tx_sat++;
25728 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25729 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25730 index 9a51df4..f3bb5f8 100644
25731 --- a/drivers/atm/he.c
25732 +++ b/drivers/atm/he.c
25733 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25734
25735 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25736 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25737 - atomic_inc(&vcc->stats->rx_drop);
25738 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25739 goto return_host_buffers;
25740 }
25741
25742 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25743 RBRQ_LEN_ERR(he_dev->rbrq_head)
25744 ? "LEN_ERR" : "",
25745 vcc->vpi, vcc->vci);
25746 - atomic_inc(&vcc->stats->rx_err);
25747 + atomic_inc_unchecked(&vcc->stats->rx_err);
25748 goto return_host_buffers;
25749 }
25750
25751 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25752 vcc->push(vcc, skb);
25753 spin_lock(&he_dev->global_lock);
25754
25755 - atomic_inc(&vcc->stats->rx);
25756 + atomic_inc_unchecked(&vcc->stats->rx);
25757
25758 return_host_buffers:
25759 ++pdus_assembled;
25760 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25761 tpd->vcc->pop(tpd->vcc, tpd->skb);
25762 else
25763 dev_kfree_skb_any(tpd->skb);
25764 - atomic_inc(&tpd->vcc->stats->tx_err);
25765 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25766 }
25767 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25768 return;
25769 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25770 vcc->pop(vcc, skb);
25771 else
25772 dev_kfree_skb_any(skb);
25773 - atomic_inc(&vcc->stats->tx_err);
25774 + atomic_inc_unchecked(&vcc->stats->tx_err);
25775 return -EINVAL;
25776 }
25777
25778 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25779 vcc->pop(vcc, skb);
25780 else
25781 dev_kfree_skb_any(skb);
25782 - atomic_inc(&vcc->stats->tx_err);
25783 + atomic_inc_unchecked(&vcc->stats->tx_err);
25784 return -EINVAL;
25785 }
25786 #endif
25787 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25788 vcc->pop(vcc, skb);
25789 else
25790 dev_kfree_skb_any(skb);
25791 - atomic_inc(&vcc->stats->tx_err);
25792 + atomic_inc_unchecked(&vcc->stats->tx_err);
25793 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25794 return -ENOMEM;
25795 }
25796 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25797 vcc->pop(vcc, skb);
25798 else
25799 dev_kfree_skb_any(skb);
25800 - atomic_inc(&vcc->stats->tx_err);
25801 + atomic_inc_unchecked(&vcc->stats->tx_err);
25802 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25803 return -ENOMEM;
25804 }
25805 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25806 __enqueue_tpd(he_dev, tpd, cid);
25807 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25808
25809 - atomic_inc(&vcc->stats->tx);
25810 + atomic_inc_unchecked(&vcc->stats->tx);
25811
25812 return 0;
25813 }
25814 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25815 index b812103..e391a49 100644
25816 --- a/drivers/atm/horizon.c
25817 +++ b/drivers/atm/horizon.c
25818 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25819 {
25820 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25821 // VC layer stats
25822 - atomic_inc(&vcc->stats->rx);
25823 + atomic_inc_unchecked(&vcc->stats->rx);
25824 __net_timestamp(skb);
25825 // end of our responsibility
25826 vcc->push (vcc, skb);
25827 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25828 dev->tx_iovec = NULL;
25829
25830 // VC layer stats
25831 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25832 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25833
25834 // free the skb
25835 hrz_kfree_skb (skb);
25836 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25837 index 1c05212..c28e200 100644
25838 --- a/drivers/atm/idt77252.c
25839 +++ b/drivers/atm/idt77252.c
25840 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25841 else
25842 dev_kfree_skb(skb);
25843
25844 - atomic_inc(&vcc->stats->tx);
25845 + atomic_inc_unchecked(&vcc->stats->tx);
25846 }
25847
25848 atomic_dec(&scq->used);
25849 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25850 if ((sb = dev_alloc_skb(64)) == NULL) {
25851 printk("%s: Can't allocate buffers for aal0.\n",
25852 card->name);
25853 - atomic_add(i, &vcc->stats->rx_drop);
25854 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25855 break;
25856 }
25857 if (!atm_charge(vcc, sb->truesize)) {
25858 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25859 card->name);
25860 - atomic_add(i - 1, &vcc->stats->rx_drop);
25861 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25862 dev_kfree_skb(sb);
25863 break;
25864 }
25865 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25866 ATM_SKB(sb)->vcc = vcc;
25867 __net_timestamp(sb);
25868 vcc->push(vcc, sb);
25869 - atomic_inc(&vcc->stats->rx);
25870 + atomic_inc_unchecked(&vcc->stats->rx);
25871
25872 cell += ATM_CELL_PAYLOAD;
25873 }
25874 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25875 "(CDC: %08x)\n",
25876 card->name, len, rpp->len, readl(SAR_REG_CDC));
25877 recycle_rx_pool_skb(card, rpp);
25878 - atomic_inc(&vcc->stats->rx_err);
25879 + atomic_inc_unchecked(&vcc->stats->rx_err);
25880 return;
25881 }
25882 if (stat & SAR_RSQE_CRC) {
25883 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25884 recycle_rx_pool_skb(card, rpp);
25885 - atomic_inc(&vcc->stats->rx_err);
25886 + atomic_inc_unchecked(&vcc->stats->rx_err);
25887 return;
25888 }
25889 if (skb_queue_len(&rpp->queue) > 1) {
25890 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25891 RXPRINTK("%s: Can't alloc RX skb.\n",
25892 card->name);
25893 recycle_rx_pool_skb(card, rpp);
25894 - atomic_inc(&vcc->stats->rx_err);
25895 + atomic_inc_unchecked(&vcc->stats->rx_err);
25896 return;
25897 }
25898 if (!atm_charge(vcc, skb->truesize)) {
25899 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25900 __net_timestamp(skb);
25901
25902 vcc->push(vcc, skb);
25903 - atomic_inc(&vcc->stats->rx);
25904 + atomic_inc_unchecked(&vcc->stats->rx);
25905
25906 return;
25907 }
25908 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25909 __net_timestamp(skb);
25910
25911 vcc->push(vcc, skb);
25912 - atomic_inc(&vcc->stats->rx);
25913 + atomic_inc_unchecked(&vcc->stats->rx);
25914
25915 if (skb->truesize > SAR_FB_SIZE_3)
25916 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25917 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25918 if (vcc->qos.aal != ATM_AAL0) {
25919 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25920 card->name, vpi, vci);
25921 - atomic_inc(&vcc->stats->rx_drop);
25922 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25923 goto drop;
25924 }
25925
25926 if ((sb = dev_alloc_skb(64)) == NULL) {
25927 printk("%s: Can't allocate buffers for AAL0.\n",
25928 card->name);
25929 - atomic_inc(&vcc->stats->rx_err);
25930 + atomic_inc_unchecked(&vcc->stats->rx_err);
25931 goto drop;
25932 }
25933
25934 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25935 ATM_SKB(sb)->vcc = vcc;
25936 __net_timestamp(sb);
25937 vcc->push(vcc, sb);
25938 - atomic_inc(&vcc->stats->rx);
25939 + atomic_inc_unchecked(&vcc->stats->rx);
25940
25941 drop:
25942 skb_pull(queue, 64);
25943 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25944
25945 if (vc == NULL) {
25946 printk("%s: NULL connection in send().\n", card->name);
25947 - atomic_inc(&vcc->stats->tx_err);
25948 + atomic_inc_unchecked(&vcc->stats->tx_err);
25949 dev_kfree_skb(skb);
25950 return -EINVAL;
25951 }
25952 if (!test_bit(VCF_TX, &vc->flags)) {
25953 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25954 - atomic_inc(&vcc->stats->tx_err);
25955 + atomic_inc_unchecked(&vcc->stats->tx_err);
25956 dev_kfree_skb(skb);
25957 return -EINVAL;
25958 }
25959 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25960 break;
25961 default:
25962 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25963 - atomic_inc(&vcc->stats->tx_err);
25964 + atomic_inc_unchecked(&vcc->stats->tx_err);
25965 dev_kfree_skb(skb);
25966 return -EINVAL;
25967 }
25968
25969 if (skb_shinfo(skb)->nr_frags != 0) {
25970 printk("%s: No scatter-gather yet.\n", card->name);
25971 - atomic_inc(&vcc->stats->tx_err);
25972 + atomic_inc_unchecked(&vcc->stats->tx_err);
25973 dev_kfree_skb(skb);
25974 return -EINVAL;
25975 }
25976 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25977
25978 err = queue_skb(card, vc, skb, oam);
25979 if (err) {
25980 - atomic_inc(&vcc->stats->tx_err);
25981 + atomic_inc_unchecked(&vcc->stats->tx_err);
25982 dev_kfree_skb(skb);
25983 return err;
25984 }
25985 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25986 skb = dev_alloc_skb(64);
25987 if (!skb) {
25988 printk("%s: Out of memory in send_oam().\n", card->name);
25989 - atomic_inc(&vcc->stats->tx_err);
25990 + atomic_inc_unchecked(&vcc->stats->tx_err);
25991 return -ENOMEM;
25992 }
25993 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25994 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25995 index 3d0c2b0..45441fa 100644
25996 --- a/drivers/atm/iphase.c
25997 +++ b/drivers/atm/iphase.c
25998 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25999 status = (u_short) (buf_desc_ptr->desc_mode);
26000 if (status & (RX_CER | RX_PTE | RX_OFL))
26001 {
26002 - atomic_inc(&vcc->stats->rx_err);
26003 + atomic_inc_unchecked(&vcc->stats->rx_err);
26004 IF_ERR(printk("IA: bad packet, dropping it");)
26005 if (status & RX_CER) {
26006 IF_ERR(printk(" cause: packet CRC error\n");)
26007 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
26008 len = dma_addr - buf_addr;
26009 if (len > iadev->rx_buf_sz) {
26010 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26011 - atomic_inc(&vcc->stats->rx_err);
26012 + atomic_inc_unchecked(&vcc->stats->rx_err);
26013 goto out_free_desc;
26014 }
26015
26016 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26017 ia_vcc = INPH_IA_VCC(vcc);
26018 if (ia_vcc == NULL)
26019 {
26020 - atomic_inc(&vcc->stats->rx_err);
26021 + atomic_inc_unchecked(&vcc->stats->rx_err);
26022 dev_kfree_skb_any(skb);
26023 atm_return(vcc, atm_guess_pdu2truesize(len));
26024 goto INCR_DLE;
26025 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26026 if ((length > iadev->rx_buf_sz) || (length >
26027 (skb->len - sizeof(struct cpcs_trailer))))
26028 {
26029 - atomic_inc(&vcc->stats->rx_err);
26030 + atomic_inc_unchecked(&vcc->stats->rx_err);
26031 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26032 length, skb->len);)
26033 dev_kfree_skb_any(skb);
26034 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26035
26036 IF_RX(printk("rx_dle_intr: skb push");)
26037 vcc->push(vcc,skb);
26038 - atomic_inc(&vcc->stats->rx);
26039 + atomic_inc_unchecked(&vcc->stats->rx);
26040 iadev->rx_pkt_cnt++;
26041 }
26042 INCR_DLE:
26043 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
26044 {
26045 struct k_sonet_stats *stats;
26046 stats = &PRIV(_ia_dev[board])->sonet_stats;
26047 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26048 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26049 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26050 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26051 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26052 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26053 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26054 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26055 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26056 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26057 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26058 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26059 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26060 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26061 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26062 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26063 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26064 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26065 }
26066 ia_cmds.status = 0;
26067 break;
26068 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
26069 if ((desc == 0) || (desc > iadev->num_tx_desc))
26070 {
26071 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26072 - atomic_inc(&vcc->stats->tx);
26073 + atomic_inc_unchecked(&vcc->stats->tx);
26074 if (vcc->pop)
26075 vcc->pop(vcc, skb);
26076 else
26077 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
26078 ATM_DESC(skb) = vcc->vci;
26079 skb_queue_tail(&iadev->tx_dma_q, skb);
26080
26081 - atomic_inc(&vcc->stats->tx);
26082 + atomic_inc_unchecked(&vcc->stats->tx);
26083 iadev->tx_pkt_cnt++;
26084 /* Increment transaction counter */
26085 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26086
26087 #if 0
26088 /* add flow control logic */
26089 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26090 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26091 if (iavcc->vc_desc_cnt > 10) {
26092 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26093 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26094 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
26095 index f556969..0da15eb 100644
26096 --- a/drivers/atm/lanai.c
26097 +++ b/drivers/atm/lanai.c
26098 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
26099 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26100 lanai_endtx(lanai, lvcc);
26101 lanai_free_skb(lvcc->tx.atmvcc, skb);
26102 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26103 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26104 }
26105
26106 /* Try to fill the buffer - don't call unless there is backlog */
26107 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
26108 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26109 __net_timestamp(skb);
26110 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26111 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26112 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26113 out:
26114 lvcc->rx.buf.ptr = end;
26115 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26116 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26117 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26118 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26119 lanai->stats.service_rxnotaal5++;
26120 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26121 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26122 return 0;
26123 }
26124 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26125 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26126 int bytes;
26127 read_unlock(&vcc_sklist_lock);
26128 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26129 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26130 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26131 lvcc->stats.x.aal5.service_trash++;
26132 bytes = (SERVICE_GET_END(s) * 16) -
26133 (((unsigned long) lvcc->rx.buf.ptr) -
26134 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26135 }
26136 if (s & SERVICE_STREAM) {
26137 read_unlock(&vcc_sklist_lock);
26138 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26139 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26140 lvcc->stats.x.aal5.service_stream++;
26141 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26142 "PDU on VCI %d!\n", lanai->number, vci);
26143 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26144 return 0;
26145 }
26146 DPRINTK("got rx crc error on vci %d\n", vci);
26147 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26148 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26149 lvcc->stats.x.aal5.service_rxcrc++;
26150 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26151 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26152 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
26153 index 1c70c45..300718d 100644
26154 --- a/drivers/atm/nicstar.c
26155 +++ b/drivers/atm/nicstar.c
26156 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26157 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
26158 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
26159 card->index);
26160 - atomic_inc(&vcc->stats->tx_err);
26161 + atomic_inc_unchecked(&vcc->stats->tx_err);
26162 dev_kfree_skb_any(skb);
26163 return -EINVAL;
26164 }
26165 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26166 if (!vc->tx) {
26167 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
26168 card->index);
26169 - atomic_inc(&vcc->stats->tx_err);
26170 + atomic_inc_unchecked(&vcc->stats->tx_err);
26171 dev_kfree_skb_any(skb);
26172 return -EINVAL;
26173 }
26174 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26175 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
26176 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
26177 card->index);
26178 - atomic_inc(&vcc->stats->tx_err);
26179 + atomic_inc_unchecked(&vcc->stats->tx_err);
26180 dev_kfree_skb_any(skb);
26181 return -EINVAL;
26182 }
26183
26184 if (skb_shinfo(skb)->nr_frags != 0) {
26185 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26186 - atomic_inc(&vcc->stats->tx_err);
26187 + atomic_inc_unchecked(&vcc->stats->tx_err);
26188 dev_kfree_skb_any(skb);
26189 return -EINVAL;
26190 }
26191 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26192 }
26193
26194 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
26195 - atomic_inc(&vcc->stats->tx_err);
26196 + atomic_inc_unchecked(&vcc->stats->tx_err);
26197 dev_kfree_skb_any(skb);
26198 return -EIO;
26199 }
26200 - atomic_inc(&vcc->stats->tx);
26201 + atomic_inc_unchecked(&vcc->stats->tx);
26202
26203 return 0;
26204 }
26205 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26206 printk
26207 ("nicstar%d: Can't allocate buffers for aal0.\n",
26208 card->index);
26209 - atomic_add(i, &vcc->stats->rx_drop);
26210 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26211 break;
26212 }
26213 if (!atm_charge(vcc, sb->truesize)) {
26214 RXPRINTK
26215 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
26216 card->index);
26217 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26218 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26219 dev_kfree_skb_any(sb);
26220 break;
26221 }
26222 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26223 ATM_SKB(sb)->vcc = vcc;
26224 __net_timestamp(sb);
26225 vcc->push(vcc, sb);
26226 - atomic_inc(&vcc->stats->rx);
26227 + atomic_inc_unchecked(&vcc->stats->rx);
26228 cell += ATM_CELL_PAYLOAD;
26229 }
26230
26231 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26232 if (iovb == NULL) {
26233 printk("nicstar%d: Out of iovec buffers.\n",
26234 card->index);
26235 - atomic_inc(&vcc->stats->rx_drop);
26236 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26237 recycle_rx_buf(card, skb);
26238 return;
26239 }
26240 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26241 small or large buffer itself. */
26242 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
26243 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26244 - atomic_inc(&vcc->stats->rx_err);
26245 + atomic_inc_unchecked(&vcc->stats->rx_err);
26246 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26247 NS_MAX_IOVECS);
26248 NS_PRV_IOVCNT(iovb) = 0;
26249 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26250 ("nicstar%d: Expected a small buffer, and this is not one.\n",
26251 card->index);
26252 which_list(card, skb);
26253 - atomic_inc(&vcc->stats->rx_err);
26254 + atomic_inc_unchecked(&vcc->stats->rx_err);
26255 recycle_rx_buf(card, skb);
26256 vc->rx_iov = NULL;
26257 recycle_iov_buf(card, iovb);
26258 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26259 ("nicstar%d: Expected a large buffer, and this is not one.\n",
26260 card->index);
26261 which_list(card, skb);
26262 - atomic_inc(&vcc->stats->rx_err);
26263 + atomic_inc_unchecked(&vcc->stats->rx_err);
26264 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26265 NS_PRV_IOVCNT(iovb));
26266 vc->rx_iov = NULL;
26267 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26268 printk(" - PDU size mismatch.\n");
26269 else
26270 printk(".\n");
26271 - atomic_inc(&vcc->stats->rx_err);
26272 + atomic_inc_unchecked(&vcc->stats->rx_err);
26273 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26274 NS_PRV_IOVCNT(iovb));
26275 vc->rx_iov = NULL;
26276 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26277 /* skb points to a small buffer */
26278 if (!atm_charge(vcc, skb->truesize)) {
26279 push_rxbufs(card, skb);
26280 - atomic_inc(&vcc->stats->rx_drop);
26281 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26282 } else {
26283 skb_put(skb, len);
26284 dequeue_sm_buf(card, skb);
26285 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26286 ATM_SKB(skb)->vcc = vcc;
26287 __net_timestamp(skb);
26288 vcc->push(vcc, skb);
26289 - atomic_inc(&vcc->stats->rx);
26290 + atomic_inc_unchecked(&vcc->stats->rx);
26291 }
26292 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26293 struct sk_buff *sb;
26294 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26295 if (len <= NS_SMBUFSIZE) {
26296 if (!atm_charge(vcc, sb->truesize)) {
26297 push_rxbufs(card, sb);
26298 - atomic_inc(&vcc->stats->rx_drop);
26299 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26300 } else {
26301 skb_put(sb, len);
26302 dequeue_sm_buf(card, sb);
26303 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26304 ATM_SKB(sb)->vcc = vcc;
26305 __net_timestamp(sb);
26306 vcc->push(vcc, sb);
26307 - atomic_inc(&vcc->stats->rx);
26308 + atomic_inc_unchecked(&vcc->stats->rx);
26309 }
26310
26311 push_rxbufs(card, skb);
26312 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26313
26314 if (!atm_charge(vcc, skb->truesize)) {
26315 push_rxbufs(card, skb);
26316 - atomic_inc(&vcc->stats->rx_drop);
26317 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26318 } else {
26319 dequeue_lg_buf(card, skb);
26320 #ifdef NS_USE_DESTRUCTORS
26321 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26322 ATM_SKB(skb)->vcc = vcc;
26323 __net_timestamp(skb);
26324 vcc->push(vcc, skb);
26325 - atomic_inc(&vcc->stats->rx);
26326 + atomic_inc_unchecked(&vcc->stats->rx);
26327 }
26328
26329 push_rxbufs(card, sb);
26330 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26331 printk
26332 ("nicstar%d: Out of huge buffers.\n",
26333 card->index);
26334 - atomic_inc(&vcc->stats->rx_drop);
26335 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26336 recycle_iovec_rx_bufs(card,
26337 (struct iovec *)
26338 iovb->data,
26339 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26340 card->hbpool.count++;
26341 } else
26342 dev_kfree_skb_any(hb);
26343 - atomic_inc(&vcc->stats->rx_drop);
26344 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26345 } else {
26346 /* Copy the small buffer to the huge buffer */
26347 sb = (struct sk_buff *)iov->iov_base;
26348 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26349 #endif /* NS_USE_DESTRUCTORS */
26350 __net_timestamp(hb);
26351 vcc->push(vcc, hb);
26352 - atomic_inc(&vcc->stats->rx);
26353 + atomic_inc_unchecked(&vcc->stats->rx);
26354 }
26355 }
26356
26357 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26358 index 5d1d076..12fbca4 100644
26359 --- a/drivers/atm/solos-pci.c
26360 +++ b/drivers/atm/solos-pci.c
26361 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26362 }
26363 atm_charge(vcc, skb->truesize);
26364 vcc->push(vcc, skb);
26365 - atomic_inc(&vcc->stats->rx);
26366 + atomic_inc_unchecked(&vcc->stats->rx);
26367 break;
26368
26369 case PKT_STATUS:
26370 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26371 vcc = SKB_CB(oldskb)->vcc;
26372
26373 if (vcc) {
26374 - atomic_inc(&vcc->stats->tx);
26375 + atomic_inc_unchecked(&vcc->stats->tx);
26376 solos_pop(vcc, oldskb);
26377 } else
26378 dev_kfree_skb_irq(oldskb);
26379 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26380 index 90f1ccc..04c4a1e 100644
26381 --- a/drivers/atm/suni.c
26382 +++ b/drivers/atm/suni.c
26383 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26384
26385
26386 #define ADD_LIMITED(s,v) \
26387 - atomic_add((v),&stats->s); \
26388 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26389 + atomic_add_unchecked((v),&stats->s); \
26390 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26391
26392
26393 static void suni_hz(unsigned long from_timer)
26394 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26395 index 5120a96..e2572bd 100644
26396 --- a/drivers/atm/uPD98402.c
26397 +++ b/drivers/atm/uPD98402.c
26398 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26399 struct sonet_stats tmp;
26400 int error = 0;
26401
26402 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26403 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26404 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26405 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26406 if (zero && !error) {
26407 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26408
26409
26410 #define ADD_LIMITED(s,v) \
26411 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26412 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26413 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26414 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26415 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26416 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26417
26418
26419 static void stat_event(struct atm_dev *dev)
26420 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26421 if (reason & uPD98402_INT_PFM) stat_event(dev);
26422 if (reason & uPD98402_INT_PCO) {
26423 (void) GET(PCOCR); /* clear interrupt cause */
26424 - atomic_add(GET(HECCT),
26425 + atomic_add_unchecked(GET(HECCT),
26426 &PRIV(dev)->sonet_stats.uncorr_hcs);
26427 }
26428 if ((reason & uPD98402_INT_RFO) &&
26429 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26430 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26431 uPD98402_INT_LOS),PIMR); /* enable them */
26432 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26433 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26434 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26435 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26436 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26437 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26438 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26439 return 0;
26440 }
26441
26442 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26443 index d889f56..17eb71e 100644
26444 --- a/drivers/atm/zatm.c
26445 +++ b/drivers/atm/zatm.c
26446 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26447 }
26448 if (!size) {
26449 dev_kfree_skb_irq(skb);
26450 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26451 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26452 continue;
26453 }
26454 if (!atm_charge(vcc,skb->truesize)) {
26455 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26456 skb->len = size;
26457 ATM_SKB(skb)->vcc = vcc;
26458 vcc->push(vcc,skb);
26459 - atomic_inc(&vcc->stats->rx);
26460 + atomic_inc_unchecked(&vcc->stats->rx);
26461 }
26462 zout(pos & 0xffff,MTA(mbx));
26463 #if 0 /* probably a stupid idea */
26464 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26465 skb_queue_head(&zatm_vcc->backlog,skb);
26466 break;
26467 }
26468 - atomic_inc(&vcc->stats->tx);
26469 + atomic_inc_unchecked(&vcc->stats->tx);
26470 wake_up(&zatm_vcc->tx_wait);
26471 }
26472
26473 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26474 index a4760e0..51283cf 100644
26475 --- a/drivers/base/devtmpfs.c
26476 +++ b/drivers/base/devtmpfs.c
26477 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26478 if (!thread)
26479 return 0;
26480
26481 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26482 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26483 if (err)
26484 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26485 else
26486 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26487 index caf995f..6f76697 100644
26488 --- a/drivers/base/power/wakeup.c
26489 +++ b/drivers/base/power/wakeup.c
26490 @@ -30,14 +30,14 @@ bool events_check_enabled;
26491 * They need to be modified together atomically, so it's better to use one
26492 * atomic variable to hold them both.
26493 */
26494 -static atomic_t combined_event_count = ATOMIC_INIT(0);
26495 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26496
26497 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26498 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26499
26500 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26501 {
26502 - unsigned int comb = atomic_read(&combined_event_count);
26503 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
26504
26505 *cnt = (comb >> IN_PROGRESS_BITS);
26506 *inpr = comb & MAX_IN_PROGRESS;
26507 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26508 ws->last_time = ktime_get();
26509
26510 /* Increment the counter of events in progress. */
26511 - atomic_inc(&combined_event_count);
26512 + atomic_inc_unchecked(&combined_event_count);
26513 }
26514
26515 /**
26516 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26517 * Increment the counter of registered wakeup events and decrement the
26518 * couter of wakeup events in progress simultaneously.
26519 */
26520 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26521 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26522 }
26523
26524 /**
26525 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26526 index b0f553b..77b928b 100644
26527 --- a/drivers/block/cciss.c
26528 +++ b/drivers/block/cciss.c
26529 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26530 int err;
26531 u32 cp;
26532
26533 + memset(&arg64, 0, sizeof(arg64));
26534 +
26535 err = 0;
26536 err |=
26537 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26538 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26539 while (!list_empty(&h->reqQ)) {
26540 c = list_entry(h->reqQ.next, CommandList_struct, list);
26541 /* can't do anything if fifo is full */
26542 - if ((h->access.fifo_full(h))) {
26543 + if ((h->access->fifo_full(h))) {
26544 dev_warn(&h->pdev->dev, "fifo full\n");
26545 break;
26546 }
26547 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26548 h->Qdepth--;
26549
26550 /* Tell the controller execute command */
26551 - h->access.submit_command(h, c);
26552 + h->access->submit_command(h, c);
26553
26554 /* Put job onto the completed Q */
26555 addQ(&h->cmpQ, c);
26556 @@ -3443,17 +3445,17 @@ startio:
26557
26558 static inline unsigned long get_next_completion(ctlr_info_t *h)
26559 {
26560 - return h->access.command_completed(h);
26561 + return h->access->command_completed(h);
26562 }
26563
26564 static inline int interrupt_pending(ctlr_info_t *h)
26565 {
26566 - return h->access.intr_pending(h);
26567 + return h->access->intr_pending(h);
26568 }
26569
26570 static inline long interrupt_not_for_us(ctlr_info_t *h)
26571 {
26572 - return ((h->access.intr_pending(h) == 0) ||
26573 + return ((h->access->intr_pending(h) == 0) ||
26574 (h->interrupts_enabled == 0));
26575 }
26576
26577 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26578 u32 a;
26579
26580 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26581 - return h->access.command_completed(h);
26582 + return h->access->command_completed(h);
26583
26584 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26585 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26586 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26587 trans_support & CFGTBL_Trans_use_short_tags);
26588
26589 /* Change the access methods to the performant access methods */
26590 - h->access = SA5_performant_access;
26591 + h->access = &SA5_performant_access;
26592 h->transMethod = CFGTBL_Trans_Performant;
26593
26594 return;
26595 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26596 if (prod_index < 0)
26597 return -ENODEV;
26598 h->product_name = products[prod_index].product_name;
26599 - h->access = *(products[prod_index].access);
26600 + h->access = products[prod_index].access;
26601
26602 if (cciss_board_disabled(h)) {
26603 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26604 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26605 }
26606
26607 /* make sure the board interrupts are off */
26608 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26609 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26610 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26611 if (rc)
26612 goto clean2;
26613 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26614 * fake ones to scoop up any residual completions.
26615 */
26616 spin_lock_irqsave(&h->lock, flags);
26617 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26618 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26619 spin_unlock_irqrestore(&h->lock, flags);
26620 free_irq(h->intr[h->intr_mode], h);
26621 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26622 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26623 dev_info(&h->pdev->dev, "Board READY.\n");
26624 dev_info(&h->pdev->dev,
26625 "Waiting for stale completions to drain.\n");
26626 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26627 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26628 msleep(10000);
26629 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26630 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26631
26632 rc = controller_reset_failed(h->cfgtable);
26633 if (rc)
26634 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26635 cciss_scsi_setup(h);
26636
26637 /* Turn the interrupts on so we can service requests */
26638 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26639 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26640
26641 /* Get the firmware version */
26642 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26643 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26644 kfree(flush_buf);
26645 if (return_code != IO_OK)
26646 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26647 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26648 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26649 free_irq(h->intr[h->intr_mode], h);
26650 }
26651
26652 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26653 index 7fda30e..eb5dfe0 100644
26654 --- a/drivers/block/cciss.h
26655 +++ b/drivers/block/cciss.h
26656 @@ -101,7 +101,7 @@ struct ctlr_info
26657 /* information about each logical volume */
26658 drive_info_struct *drv[CISS_MAX_LUN];
26659
26660 - struct access_method access;
26661 + struct access_method *access;
26662
26663 /* queue and queue Info */
26664 struct list_head reqQ;
26665 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26666 index 9125bbe..eede5c8 100644
26667 --- a/drivers/block/cpqarray.c
26668 +++ b/drivers/block/cpqarray.c
26669 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26670 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26671 goto Enomem4;
26672 }
26673 - hba[i]->access.set_intr_mask(hba[i], 0);
26674 + hba[i]->access->set_intr_mask(hba[i], 0);
26675 if (request_irq(hba[i]->intr, do_ida_intr,
26676 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26677 {
26678 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26679 add_timer(&hba[i]->timer);
26680
26681 /* Enable IRQ now that spinlock and rate limit timer are set up */
26682 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26683 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26684
26685 for(j=0; j<NWD; j++) {
26686 struct gendisk *disk = ida_gendisk[i][j];
26687 @@ -694,7 +694,7 @@ DBGINFO(
26688 for(i=0; i<NR_PRODUCTS; i++) {
26689 if (board_id == products[i].board_id) {
26690 c->product_name = products[i].product_name;
26691 - c->access = *(products[i].access);
26692 + c->access = products[i].access;
26693 break;
26694 }
26695 }
26696 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26697 hba[ctlr]->intr = intr;
26698 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26699 hba[ctlr]->product_name = products[j].product_name;
26700 - hba[ctlr]->access = *(products[j].access);
26701 + hba[ctlr]->access = products[j].access;
26702 hba[ctlr]->ctlr = ctlr;
26703 hba[ctlr]->board_id = board_id;
26704 hba[ctlr]->pci_dev = NULL; /* not PCI */
26705 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26706
26707 while((c = h->reqQ) != NULL) {
26708 /* Can't do anything if we're busy */
26709 - if (h->access.fifo_full(h) == 0)
26710 + if (h->access->fifo_full(h) == 0)
26711 return;
26712
26713 /* Get the first entry from the request Q */
26714 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26715 h->Qdepth--;
26716
26717 /* Tell the controller to do our bidding */
26718 - h->access.submit_command(h, c);
26719 + h->access->submit_command(h, c);
26720
26721 /* Get onto the completion Q */
26722 addQ(&h->cmpQ, c);
26723 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26724 unsigned long flags;
26725 __u32 a,a1;
26726
26727 - istat = h->access.intr_pending(h);
26728 + istat = h->access->intr_pending(h);
26729 /* Is this interrupt for us? */
26730 if (istat == 0)
26731 return IRQ_NONE;
26732 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26733 */
26734 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26735 if (istat & FIFO_NOT_EMPTY) {
26736 - while((a = h->access.command_completed(h))) {
26737 + while((a = h->access->command_completed(h))) {
26738 a1 = a; a &= ~3;
26739 if ((c = h->cmpQ) == NULL)
26740 {
26741 @@ -1449,11 +1449,11 @@ static int sendcmd(
26742 /*
26743 * Disable interrupt
26744 */
26745 - info_p->access.set_intr_mask(info_p, 0);
26746 + info_p->access->set_intr_mask(info_p, 0);
26747 /* Make sure there is room in the command FIFO */
26748 /* Actually it should be completely empty at this time. */
26749 for (i = 200000; i > 0; i--) {
26750 - temp = info_p->access.fifo_full(info_p);
26751 + temp = info_p->access->fifo_full(info_p);
26752 if (temp != 0) {
26753 break;
26754 }
26755 @@ -1466,7 +1466,7 @@ DBG(
26756 /*
26757 * Send the cmd
26758 */
26759 - info_p->access.submit_command(info_p, c);
26760 + info_p->access->submit_command(info_p, c);
26761 complete = pollcomplete(ctlr);
26762
26763 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26764 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26765 * we check the new geometry. Then turn interrupts back on when
26766 * we're done.
26767 */
26768 - host->access.set_intr_mask(host, 0);
26769 + host->access->set_intr_mask(host, 0);
26770 getgeometry(ctlr);
26771 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26772 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26773
26774 for(i=0; i<NWD; i++) {
26775 struct gendisk *disk = ida_gendisk[ctlr][i];
26776 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26777 /* Wait (up to 2 seconds) for a command to complete */
26778
26779 for (i = 200000; i > 0; i--) {
26780 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
26781 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
26782 if (done == 0) {
26783 udelay(10); /* a short fixed delay */
26784 } else
26785 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26786 index be73e9d..7fbf140 100644
26787 --- a/drivers/block/cpqarray.h
26788 +++ b/drivers/block/cpqarray.h
26789 @@ -99,7 +99,7 @@ struct ctlr_info {
26790 drv_info_t drv[NWD];
26791 struct proc_dir_entry *proc;
26792
26793 - struct access_method access;
26794 + struct access_method *access;
26795
26796 cmdlist_t *reqQ;
26797 cmdlist_t *cmpQ;
26798 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26799 index 9cf2035..bffca95 100644
26800 --- a/drivers/block/drbd/drbd_int.h
26801 +++ b/drivers/block/drbd/drbd_int.h
26802 @@ -736,7 +736,7 @@ struct drbd_request;
26803 struct drbd_epoch {
26804 struct list_head list;
26805 unsigned int barrier_nr;
26806 - atomic_t epoch_size; /* increased on every request added. */
26807 + atomic_unchecked_t epoch_size; /* increased on every request added. */
26808 atomic_t active; /* increased on every req. added, and dec on every finished. */
26809 unsigned long flags;
26810 };
26811 @@ -1108,7 +1108,7 @@ struct drbd_conf {
26812 void *int_dig_in;
26813 void *int_dig_vv;
26814 wait_queue_head_t seq_wait;
26815 - atomic_t packet_seq;
26816 + atomic_unchecked_t packet_seq;
26817 unsigned int peer_seq;
26818 spinlock_t peer_seq_lock;
26819 unsigned int minor;
26820 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26821
26822 static inline void drbd_tcp_cork(struct socket *sock)
26823 {
26824 - int __user val = 1;
26825 + int val = 1;
26826 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26827 - (char __user *)&val, sizeof(val));
26828 + (char __force_user *)&val, sizeof(val));
26829 }
26830
26831 static inline void drbd_tcp_uncork(struct socket *sock)
26832 {
26833 - int __user val = 0;
26834 + int val = 0;
26835 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26836 - (char __user *)&val, sizeof(val));
26837 + (char __force_user *)&val, sizeof(val));
26838 }
26839
26840 static inline void drbd_tcp_nodelay(struct socket *sock)
26841 {
26842 - int __user val = 1;
26843 + int val = 1;
26844 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26845 - (char __user *)&val, sizeof(val));
26846 + (char __force_user *)&val, sizeof(val));
26847 }
26848
26849 static inline void drbd_tcp_quickack(struct socket *sock)
26850 {
26851 - int __user val = 2;
26852 + int val = 2;
26853 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26854 - (char __user *)&val, sizeof(val));
26855 + (char __force_user *)&val, sizeof(val));
26856 }
26857
26858 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26859 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26860 index 0358e55..bc33689 100644
26861 --- a/drivers/block/drbd/drbd_main.c
26862 +++ b/drivers/block/drbd/drbd_main.c
26863 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26864 p.sector = sector;
26865 p.block_id = block_id;
26866 p.blksize = blksize;
26867 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26868 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26869
26870 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26871 return false;
26872 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26873 p.sector = cpu_to_be64(req->sector);
26874 p.block_id = (unsigned long)req;
26875 p.seq_num = cpu_to_be32(req->seq_num =
26876 - atomic_add_return(1, &mdev->packet_seq));
26877 + atomic_add_return_unchecked(1, &mdev->packet_seq));
26878
26879 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26880
26881 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26882 atomic_set(&mdev->unacked_cnt, 0);
26883 atomic_set(&mdev->local_cnt, 0);
26884 atomic_set(&mdev->net_cnt, 0);
26885 - atomic_set(&mdev->packet_seq, 0);
26886 + atomic_set_unchecked(&mdev->packet_seq, 0);
26887 atomic_set(&mdev->pp_in_use, 0);
26888 atomic_set(&mdev->pp_in_use_by_net, 0);
26889 atomic_set(&mdev->rs_sect_in, 0);
26890 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26891 mdev->receiver.t_state);
26892
26893 /* no need to lock it, I'm the only thread alive */
26894 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26895 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26896 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26897 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26898 mdev->al_writ_cnt =
26899 mdev->bm_writ_cnt =
26900 mdev->read_cnt =
26901 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26902 index af2a250..219c74b 100644
26903 --- a/drivers/block/drbd/drbd_nl.c
26904 +++ b/drivers/block/drbd/drbd_nl.c
26905 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26906 module_put(THIS_MODULE);
26907 }
26908
26909 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26910 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26911
26912 static unsigned short *
26913 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26914 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26915 cn_reply->id.idx = CN_IDX_DRBD;
26916 cn_reply->id.val = CN_VAL_DRBD;
26917
26918 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26919 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26920 cn_reply->ack = 0; /* not used here. */
26921 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26922 (int)((char *)tl - (char *)reply->tag_list);
26923 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26924 cn_reply->id.idx = CN_IDX_DRBD;
26925 cn_reply->id.val = CN_VAL_DRBD;
26926
26927 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26928 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26929 cn_reply->ack = 0; /* not used here. */
26930 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26931 (int)((char *)tl - (char *)reply->tag_list);
26932 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26933 cn_reply->id.idx = CN_IDX_DRBD;
26934 cn_reply->id.val = CN_VAL_DRBD;
26935
26936 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26937 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26938 cn_reply->ack = 0; // not used here.
26939 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26940 (int)((char*)tl - (char*)reply->tag_list);
26941 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26942 cn_reply->id.idx = CN_IDX_DRBD;
26943 cn_reply->id.val = CN_VAL_DRBD;
26944
26945 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26946 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26947 cn_reply->ack = 0; /* not used here. */
26948 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26949 (int)((char *)tl - (char *)reply->tag_list);
26950 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26951 index 43beaca..4a5b1dd 100644
26952 --- a/drivers/block/drbd/drbd_receiver.c
26953 +++ b/drivers/block/drbd/drbd_receiver.c
26954 @@ -894,7 +894,7 @@ retry:
26955 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26956 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26957
26958 - atomic_set(&mdev->packet_seq, 0);
26959 + atomic_set_unchecked(&mdev->packet_seq, 0);
26960 mdev->peer_seq = 0;
26961
26962 drbd_thread_start(&mdev->asender);
26963 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26964 do {
26965 next_epoch = NULL;
26966
26967 - epoch_size = atomic_read(&epoch->epoch_size);
26968 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26969
26970 switch (ev & ~EV_CLEANUP) {
26971 case EV_PUT:
26972 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26973 rv = FE_DESTROYED;
26974 } else {
26975 epoch->flags = 0;
26976 - atomic_set(&epoch->epoch_size, 0);
26977 + atomic_set_unchecked(&epoch->epoch_size, 0);
26978 /* atomic_set(&epoch->active, 0); is already zero */
26979 if (rv == FE_STILL_LIVE)
26980 rv = FE_RECYCLED;
26981 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26982 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26983 drbd_flush(mdev);
26984
26985 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26986 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26987 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26988 if (epoch)
26989 break;
26990 }
26991
26992 epoch = mdev->current_epoch;
26993 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26994 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26995
26996 D_ASSERT(atomic_read(&epoch->active) == 0);
26997 D_ASSERT(epoch->flags == 0);
26998 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26999 }
27000
27001 epoch->flags = 0;
27002 - atomic_set(&epoch->epoch_size, 0);
27003 + atomic_set_unchecked(&epoch->epoch_size, 0);
27004 atomic_set(&epoch->active, 0);
27005
27006 spin_lock(&mdev->epoch_lock);
27007 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
27008 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
27009 list_add(&epoch->list, &mdev->current_epoch->list);
27010 mdev->current_epoch = epoch;
27011 mdev->epochs++;
27012 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
27013 spin_unlock(&mdev->peer_seq_lock);
27014
27015 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
27016 - atomic_inc(&mdev->current_epoch->epoch_size);
27017 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
27018 return drbd_drain_block(mdev, data_size);
27019 }
27020
27021 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
27022
27023 spin_lock(&mdev->epoch_lock);
27024 e->epoch = mdev->current_epoch;
27025 - atomic_inc(&e->epoch->epoch_size);
27026 + atomic_inc_unchecked(&e->epoch->epoch_size);
27027 atomic_inc(&e->epoch->active);
27028 spin_unlock(&mdev->epoch_lock);
27029
27030 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
27031 D_ASSERT(list_empty(&mdev->done_ee));
27032
27033 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
27034 - atomic_set(&mdev->current_epoch->epoch_size, 0);
27035 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
27036 D_ASSERT(list_empty(&mdev->current_epoch->list));
27037 }
27038
27039 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
27040 index 1e888c9..05cf1b0 100644
27041 --- a/drivers/block/loop.c
27042 +++ b/drivers/block/loop.c
27043 @@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
27044 mm_segment_t old_fs = get_fs();
27045
27046 set_fs(get_ds());
27047 - bw = file->f_op->write(file, buf, len, &pos);
27048 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
27049 set_fs(old_fs);
27050 if (likely(bw == len))
27051 return 0;
27052 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
27053 index 4364303..9adf4ee 100644
27054 --- a/drivers/char/Kconfig
27055 +++ b/drivers/char/Kconfig
27056 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
27057
27058 config DEVKMEM
27059 bool "/dev/kmem virtual device support"
27060 - default y
27061 + default n
27062 + depends on !GRKERNSEC_KMEM
27063 help
27064 Say Y here if you want to support the /dev/kmem device. The
27065 /dev/kmem device is rarely used, but can be used for certain
27066 @@ -596,6 +597,7 @@ config DEVPORT
27067 bool
27068 depends on !M68K
27069 depends on ISA || PCI
27070 + depends on !GRKERNSEC_KMEM
27071 default y
27072
27073 source "drivers/s390/char/Kconfig"
27074 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
27075 index 2e04433..22afc64 100644
27076 --- a/drivers/char/agp/frontend.c
27077 +++ b/drivers/char/agp/frontend.c
27078 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
27079 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27080 return -EFAULT;
27081
27082 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27083 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27084 return -EFAULT;
27085
27086 client = agp_find_client_by_pid(reserve.pid);
27087 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
27088 index 095ab90..afad0a4 100644
27089 --- a/drivers/char/briq_panel.c
27090 +++ b/drivers/char/briq_panel.c
27091 @@ -9,6 +9,7 @@
27092 #include <linux/types.h>
27093 #include <linux/errno.h>
27094 #include <linux/tty.h>
27095 +#include <linux/mutex.h>
27096 #include <linux/timer.h>
27097 #include <linux/kernel.h>
27098 #include <linux/wait.h>
27099 @@ -34,6 +35,7 @@ static int vfd_is_open;
27100 static unsigned char vfd[40];
27101 static int vfd_cursor;
27102 static unsigned char ledpb, led;
27103 +static DEFINE_MUTEX(vfd_mutex);
27104
27105 static void update_vfd(void)
27106 {
27107 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27108 if (!vfd_is_open)
27109 return -EBUSY;
27110
27111 + mutex_lock(&vfd_mutex);
27112 for (;;) {
27113 char c;
27114 if (!indx)
27115 break;
27116 - if (get_user(c, buf))
27117 + if (get_user(c, buf)) {
27118 + mutex_unlock(&vfd_mutex);
27119 return -EFAULT;
27120 + }
27121 if (esc) {
27122 set_led(c);
27123 esc = 0;
27124 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27125 buf++;
27126 }
27127 update_vfd();
27128 + mutex_unlock(&vfd_mutex);
27129
27130 return len;
27131 }
27132 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
27133 index f773a9d..65cd683 100644
27134 --- a/drivers/char/genrtc.c
27135 +++ b/drivers/char/genrtc.c
27136 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
27137 switch (cmd) {
27138
27139 case RTC_PLL_GET:
27140 + memset(&pll, 0, sizeof(pll));
27141 if (get_rtc_pll(&pll))
27142 return -EINVAL;
27143 else
27144 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
27145 index 0833896..cccce52 100644
27146 --- a/drivers/char/hpet.c
27147 +++ b/drivers/char/hpet.c
27148 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
27149 }
27150
27151 static int
27152 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
27153 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
27154 struct hpet_info *info)
27155 {
27156 struct hpet_timer __iomem *timer;
27157 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
27158 index 58c0e63..46c16bf 100644
27159 --- a/drivers/char/ipmi/ipmi_msghandler.c
27160 +++ b/drivers/char/ipmi/ipmi_msghandler.c
27161 @@ -415,7 +415,7 @@ struct ipmi_smi {
27162 struct proc_dir_entry *proc_dir;
27163 char proc_dir_name[10];
27164
27165 - atomic_t stats[IPMI_NUM_STATS];
27166 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27167
27168 /*
27169 * run_to_completion duplicate of smb_info, smi_info
27170 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27171
27172
27173 #define ipmi_inc_stat(intf, stat) \
27174 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27175 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27176 #define ipmi_get_stat(intf, stat) \
27177 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27178 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27179
27180 static int is_lan_addr(struct ipmi_addr *addr)
27181 {
27182 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
27183 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27184 init_waitqueue_head(&intf->waitq);
27185 for (i = 0; i < IPMI_NUM_STATS; i++)
27186 - atomic_set(&intf->stats[i], 0);
27187 + atomic_set_unchecked(&intf->stats[i], 0);
27188
27189 intf->proc_dir = NULL;
27190
27191 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
27192 index 9397ab4..d01bee1 100644
27193 --- a/drivers/char/ipmi/ipmi_si_intf.c
27194 +++ b/drivers/char/ipmi/ipmi_si_intf.c
27195 @@ -277,7 +277,7 @@ struct smi_info {
27196 unsigned char slave_addr;
27197
27198 /* Counters and things for the proc filesystem. */
27199 - atomic_t stats[SI_NUM_STATS];
27200 + atomic_unchecked_t stats[SI_NUM_STATS];
27201
27202 struct task_struct *thread;
27203
27204 @@ -286,9 +286,9 @@ struct smi_info {
27205 };
27206
27207 #define smi_inc_stat(smi, stat) \
27208 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27209 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27210 #define smi_get_stat(smi, stat) \
27211 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27212 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27213
27214 #define SI_MAX_PARMS 4
27215
27216 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
27217 atomic_set(&new_smi->req_events, 0);
27218 new_smi->run_to_completion = 0;
27219 for (i = 0; i < SI_NUM_STATS; i++)
27220 - atomic_set(&new_smi->stats[i], 0);
27221 + atomic_set_unchecked(&new_smi->stats[i], 0);
27222
27223 new_smi->interrupt_disabled = 1;
27224 atomic_set(&new_smi->stop_operation, 0);
27225 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
27226 index 1aeaaba..e018570 100644
27227 --- a/drivers/char/mbcs.c
27228 +++ b/drivers/char/mbcs.c
27229 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
27230 return 0;
27231 }
27232
27233 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
27234 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
27235 {
27236 .part_num = MBCS_PART_NUM,
27237 .mfg_num = MBCS_MFG_NUM,
27238 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
27239 index 1451790..f705c30 100644
27240 --- a/drivers/char/mem.c
27241 +++ b/drivers/char/mem.c
27242 @@ -18,6 +18,7 @@
27243 #include <linux/raw.h>
27244 #include <linux/tty.h>
27245 #include <linux/capability.h>
27246 +#include <linux/security.h>
27247 #include <linux/ptrace.h>
27248 #include <linux/device.h>
27249 #include <linux/highmem.h>
27250 @@ -35,6 +36,10 @@
27251 # include <linux/efi.h>
27252 #endif
27253
27254 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27255 +extern const struct file_operations grsec_fops;
27256 +#endif
27257 +
27258 static inline unsigned long size_inside_page(unsigned long start,
27259 unsigned long size)
27260 {
27261 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27262
27263 while (cursor < to) {
27264 if (!devmem_is_allowed(pfn)) {
27265 +#ifdef CONFIG_GRKERNSEC_KMEM
27266 + gr_handle_mem_readwrite(from, to);
27267 +#else
27268 printk(KERN_INFO
27269 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27270 current->comm, from, to);
27271 +#endif
27272 return 0;
27273 }
27274 cursor += PAGE_SIZE;
27275 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27276 }
27277 return 1;
27278 }
27279 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27280 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27281 +{
27282 + return 0;
27283 +}
27284 #else
27285 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27286 {
27287 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27288
27289 while (count > 0) {
27290 unsigned long remaining;
27291 + char *temp;
27292
27293 sz = size_inside_page(p, count);
27294
27295 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27296 if (!ptr)
27297 return -EFAULT;
27298
27299 - remaining = copy_to_user(buf, ptr, sz);
27300 +#ifdef CONFIG_PAX_USERCOPY
27301 + temp = kmalloc(sz, GFP_KERNEL);
27302 + if (!temp) {
27303 + unxlate_dev_mem_ptr(p, ptr);
27304 + return -ENOMEM;
27305 + }
27306 + memcpy(temp, ptr, sz);
27307 +#else
27308 + temp = ptr;
27309 +#endif
27310 +
27311 + remaining = copy_to_user(buf, temp, sz);
27312 +
27313 +#ifdef CONFIG_PAX_USERCOPY
27314 + kfree(temp);
27315 +#endif
27316 +
27317 unxlate_dev_mem_ptr(p, ptr);
27318 if (remaining)
27319 return -EFAULT;
27320 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27321 size_t count, loff_t *ppos)
27322 {
27323 unsigned long p = *ppos;
27324 - ssize_t low_count, read, sz;
27325 + ssize_t low_count, read, sz, err = 0;
27326 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27327 - int err = 0;
27328
27329 read = 0;
27330 if (p < (unsigned long) high_memory) {
27331 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27332 }
27333 #endif
27334 while (low_count > 0) {
27335 + char *temp;
27336 +
27337 sz = size_inside_page(p, low_count);
27338
27339 /*
27340 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27341 */
27342 kbuf = xlate_dev_kmem_ptr((char *)p);
27343
27344 - if (copy_to_user(buf, kbuf, sz))
27345 +#ifdef CONFIG_PAX_USERCOPY
27346 + temp = kmalloc(sz, GFP_KERNEL);
27347 + if (!temp)
27348 + return -ENOMEM;
27349 + memcpy(temp, kbuf, sz);
27350 +#else
27351 + temp = kbuf;
27352 +#endif
27353 +
27354 + err = copy_to_user(buf, temp, sz);
27355 +
27356 +#ifdef CONFIG_PAX_USERCOPY
27357 + kfree(temp);
27358 +#endif
27359 +
27360 + if (err)
27361 return -EFAULT;
27362 buf += sz;
27363 p += sz;
27364 @@ -867,6 +914,9 @@ static const struct memdev {
27365 #ifdef CONFIG_CRASH_DUMP
27366 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27367 #endif
27368 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27369 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27370 +#endif
27371 };
27372
27373 static int memory_open(struct inode *inode, struct file *filp)
27374 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27375 index da3cfee..a5a6606 100644
27376 --- a/drivers/char/nvram.c
27377 +++ b/drivers/char/nvram.c
27378 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27379
27380 spin_unlock_irq(&rtc_lock);
27381
27382 - if (copy_to_user(buf, contents, tmp - contents))
27383 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27384 return -EFAULT;
27385
27386 *ppos = i;
27387 diff --git a/drivers/char/random.c b/drivers/char/random.c
27388 index 6035ab8..bdfe4fd 100644
27389 --- a/drivers/char/random.c
27390 +++ b/drivers/char/random.c
27391 @@ -261,8 +261,13 @@
27392 /*
27393 * Configuration information
27394 */
27395 +#ifdef CONFIG_GRKERNSEC_RANDNET
27396 +#define INPUT_POOL_WORDS 512
27397 +#define OUTPUT_POOL_WORDS 128
27398 +#else
27399 #define INPUT_POOL_WORDS 128
27400 #define OUTPUT_POOL_WORDS 32
27401 +#endif
27402 #define SEC_XFER_SIZE 512
27403 #define EXTRACT_SIZE 10
27404
27405 @@ -300,10 +305,17 @@ static struct poolinfo {
27406 int poolwords;
27407 int tap1, tap2, tap3, tap4, tap5;
27408 } poolinfo_table[] = {
27409 +#ifdef CONFIG_GRKERNSEC_RANDNET
27410 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27411 + { 512, 411, 308, 208, 104, 1 },
27412 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27413 + { 128, 103, 76, 51, 25, 1 },
27414 +#else
27415 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27416 { 128, 103, 76, 51, 25, 1 },
27417 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27418 { 32, 26, 20, 14, 7, 1 },
27419 +#endif
27420 #if 0
27421 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27422 { 2048, 1638, 1231, 819, 411, 1 },
27423 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27424
27425 extract_buf(r, tmp);
27426 i = min_t(int, nbytes, EXTRACT_SIZE);
27427 - if (copy_to_user(buf, tmp, i)) {
27428 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27429 ret = -EFAULT;
27430 break;
27431 }
27432 @@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27433 #include <linux/sysctl.h>
27434
27435 static int min_read_thresh = 8, min_write_thresh;
27436 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27437 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27438 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27439 static char sysctl_bootid[16];
27440
27441 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27442 index 1ee8ce7..b778bef 100644
27443 --- a/drivers/char/sonypi.c
27444 +++ b/drivers/char/sonypi.c
27445 @@ -55,6 +55,7 @@
27446 #include <asm/uaccess.h>
27447 #include <asm/io.h>
27448 #include <asm/system.h>
27449 +#include <asm/local.h>
27450
27451 #include <linux/sonypi.h>
27452
27453 @@ -491,7 +492,7 @@ static struct sonypi_device {
27454 spinlock_t fifo_lock;
27455 wait_queue_head_t fifo_proc_list;
27456 struct fasync_struct *fifo_async;
27457 - int open_count;
27458 + local_t open_count;
27459 int model;
27460 struct input_dev *input_jog_dev;
27461 struct input_dev *input_key_dev;
27462 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27463 static int sonypi_misc_release(struct inode *inode, struct file *file)
27464 {
27465 mutex_lock(&sonypi_device.lock);
27466 - sonypi_device.open_count--;
27467 + local_dec(&sonypi_device.open_count);
27468 mutex_unlock(&sonypi_device.lock);
27469 return 0;
27470 }
27471 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27472 {
27473 mutex_lock(&sonypi_device.lock);
27474 /* Flush input queue on first open */
27475 - if (!sonypi_device.open_count)
27476 + if (!local_read(&sonypi_device.open_count))
27477 kfifo_reset(&sonypi_device.fifo);
27478 - sonypi_device.open_count++;
27479 + local_inc(&sonypi_device.open_count);
27480 mutex_unlock(&sonypi_device.lock);
27481
27482 return 0;
27483 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27484 index 361a1df..2471eee 100644
27485 --- a/drivers/char/tpm/tpm.c
27486 +++ b/drivers/char/tpm/tpm.c
27487 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27488 chip->vendor.req_complete_val)
27489 goto out_recv;
27490
27491 - if ((status == chip->vendor.req_canceled)) {
27492 + if (status == chip->vendor.req_canceled) {
27493 dev_err(chip->dev, "Operation Canceled\n");
27494 rc = -ECANCELED;
27495 goto out;
27496 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27497 index 0636520..169c1d0 100644
27498 --- a/drivers/char/tpm/tpm_bios.c
27499 +++ b/drivers/char/tpm/tpm_bios.c
27500 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27501 event = addr;
27502
27503 if ((event->event_type == 0 && event->event_size == 0) ||
27504 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27505 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27506 return NULL;
27507
27508 return addr;
27509 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27510 return NULL;
27511
27512 if ((event->event_type == 0 && event->event_size == 0) ||
27513 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27514 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27515 return NULL;
27516
27517 (*pos)++;
27518 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27519 int i;
27520
27521 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27522 - seq_putc(m, data[i]);
27523 + if (!seq_putc(m, data[i]))
27524 + return -EFAULT;
27525
27526 return 0;
27527 }
27528 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27529 log->bios_event_log_end = log->bios_event_log + len;
27530
27531 virt = acpi_os_map_memory(start, len);
27532 + if (!virt) {
27533 + kfree(log->bios_event_log);
27534 + log->bios_event_log = NULL;
27535 + return -EFAULT;
27536 + }
27537
27538 - memcpy(log->bios_event_log, virt, len);
27539 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27540
27541 acpi_os_unmap_memory(virt, len);
27542 return 0;
27543 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27544 index 8e3c46d..c139b99 100644
27545 --- a/drivers/char/virtio_console.c
27546 +++ b/drivers/char/virtio_console.c
27547 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27548 if (to_user) {
27549 ssize_t ret;
27550
27551 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27552 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27553 if (ret)
27554 return -EFAULT;
27555 } else {
27556 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27557 if (!port_has_data(port) && !port->host_connected)
27558 return 0;
27559
27560 - return fill_readbuf(port, ubuf, count, true);
27561 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27562 }
27563
27564 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27565 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27566 index eb1d864..39ee5a7 100644
27567 --- a/drivers/dma/dmatest.c
27568 +++ b/drivers/dma/dmatest.c
27569 @@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27570 }
27571 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27572 cnt = dmatest_add_threads(dtc, DMA_PQ);
27573 - thread_count += cnt > 0 ?: 0;
27574 + thread_count += cnt > 0 ? cnt : 0;
27575 }
27576
27577 pr_info("dmatest: Started %u threads using %s\n",
27578 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27579 index c9eee6d..f9d5280 100644
27580 --- a/drivers/edac/amd64_edac.c
27581 +++ b/drivers/edac/amd64_edac.c
27582 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27583 * PCI core identifies what devices are on a system during boot, and then
27584 * inquiry this table to see if this driver is for a given device found.
27585 */
27586 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27587 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27588 {
27589 .vendor = PCI_VENDOR_ID_AMD,
27590 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27591 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27592 index e47e73b..348e0bd 100644
27593 --- a/drivers/edac/amd76x_edac.c
27594 +++ b/drivers/edac/amd76x_edac.c
27595 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27596 edac_mc_free(mci);
27597 }
27598
27599 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27600 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27601 {
27602 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27603 AMD762},
27604 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27605 index 1af531a..3a8ff27 100644
27606 --- a/drivers/edac/e752x_edac.c
27607 +++ b/drivers/edac/e752x_edac.c
27608 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27609 edac_mc_free(mci);
27610 }
27611
27612 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27613 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27614 {
27615 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27616 E7520},
27617 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27618 index 6ffb6d2..383d8d7 100644
27619 --- a/drivers/edac/e7xxx_edac.c
27620 +++ b/drivers/edac/e7xxx_edac.c
27621 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27622 edac_mc_free(mci);
27623 }
27624
27625 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27626 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27627 {
27628 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27629 E7205},
27630 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27631 index 495198a..ac08c85 100644
27632 --- a/drivers/edac/edac_pci_sysfs.c
27633 +++ b/drivers/edac/edac_pci_sysfs.c
27634 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27635 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27636 static int edac_pci_poll_msec = 1000; /* one second workq period */
27637
27638 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27639 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27640 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27641 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27642
27643 static struct kobject *edac_pci_top_main_kobj;
27644 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27645 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27646 edac_printk(KERN_CRIT, EDAC_PCI,
27647 "Signaled System Error on %s\n",
27648 pci_name(dev));
27649 - atomic_inc(&pci_nonparity_count);
27650 + atomic_inc_unchecked(&pci_nonparity_count);
27651 }
27652
27653 if (status & (PCI_STATUS_PARITY)) {
27654 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27655 "Master Data Parity Error on %s\n",
27656 pci_name(dev));
27657
27658 - atomic_inc(&pci_parity_count);
27659 + atomic_inc_unchecked(&pci_parity_count);
27660 }
27661
27662 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27663 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27664 "Detected Parity Error on %s\n",
27665 pci_name(dev));
27666
27667 - atomic_inc(&pci_parity_count);
27668 + atomic_inc_unchecked(&pci_parity_count);
27669 }
27670 }
27671
27672 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27673 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27674 "Signaled System Error on %s\n",
27675 pci_name(dev));
27676 - atomic_inc(&pci_nonparity_count);
27677 + atomic_inc_unchecked(&pci_nonparity_count);
27678 }
27679
27680 if (status & (PCI_STATUS_PARITY)) {
27681 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27682 "Master Data Parity Error on "
27683 "%s\n", pci_name(dev));
27684
27685 - atomic_inc(&pci_parity_count);
27686 + atomic_inc_unchecked(&pci_parity_count);
27687 }
27688
27689 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27690 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27691 "Detected Parity Error on %s\n",
27692 pci_name(dev));
27693
27694 - atomic_inc(&pci_parity_count);
27695 + atomic_inc_unchecked(&pci_parity_count);
27696 }
27697 }
27698 }
27699 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27700 if (!check_pci_errors)
27701 return;
27702
27703 - before_count = atomic_read(&pci_parity_count);
27704 + before_count = atomic_read_unchecked(&pci_parity_count);
27705
27706 /* scan all PCI devices looking for a Parity Error on devices and
27707 * bridges.
27708 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27709 /* Only if operator has selected panic on PCI Error */
27710 if (edac_pci_get_panic_on_pe()) {
27711 /* If the count is different 'after' from 'before' */
27712 - if (before_count != atomic_read(&pci_parity_count))
27713 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27714 panic("EDAC: PCI Parity Error");
27715 }
27716 }
27717 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27718 index c0510b3..6e2a954 100644
27719 --- a/drivers/edac/i3000_edac.c
27720 +++ b/drivers/edac/i3000_edac.c
27721 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27722 edac_mc_free(mci);
27723 }
27724
27725 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27726 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27727 {
27728 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27729 I3000},
27730 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27731 index aa08497..7e6822a 100644
27732 --- a/drivers/edac/i3200_edac.c
27733 +++ b/drivers/edac/i3200_edac.c
27734 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27735 edac_mc_free(mci);
27736 }
27737
27738 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27739 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27740 {
27741 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27742 I3200},
27743 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27744 index 4dc3ac2..67d05a6 100644
27745 --- a/drivers/edac/i5000_edac.c
27746 +++ b/drivers/edac/i5000_edac.c
27747 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27748 *
27749 * The "E500P" device is the first device supported.
27750 */
27751 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27752 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27753 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27754 .driver_data = I5000P},
27755
27756 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27757 index bcbdeec..9886d16 100644
27758 --- a/drivers/edac/i5100_edac.c
27759 +++ b/drivers/edac/i5100_edac.c
27760 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27761 edac_mc_free(mci);
27762 }
27763
27764 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27765 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27766 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27767 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27768 { 0, }
27769 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27770 index 74d6ec34..baff517 100644
27771 --- a/drivers/edac/i5400_edac.c
27772 +++ b/drivers/edac/i5400_edac.c
27773 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27774 *
27775 * The "E500P" device is the first device supported.
27776 */
27777 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27778 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27779 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27780 {0,} /* 0 terminated list. */
27781 };
27782 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27783 index 6104dba..e7ea8e1 100644
27784 --- a/drivers/edac/i7300_edac.c
27785 +++ b/drivers/edac/i7300_edac.c
27786 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27787 *
27788 * Has only 8086:360c PCI ID
27789 */
27790 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27791 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27792 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27793 {0,} /* 0 terminated list. */
27794 };
27795 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27796 index 70ad892..178943c 100644
27797 --- a/drivers/edac/i7core_edac.c
27798 +++ b/drivers/edac/i7core_edac.c
27799 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27800 /*
27801 * pci_device_id table for which devices we are looking for
27802 */
27803 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27804 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27805 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27806 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27807 {0,} /* 0 terminated list. */
27808 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27809 index 4329d39..f3022ef 100644
27810 --- a/drivers/edac/i82443bxgx_edac.c
27811 +++ b/drivers/edac/i82443bxgx_edac.c
27812 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27813
27814 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27815
27816 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27817 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27818 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27819 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27820 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27821 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27822 index 931a057..fd28340 100644
27823 --- a/drivers/edac/i82860_edac.c
27824 +++ b/drivers/edac/i82860_edac.c
27825 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27826 edac_mc_free(mci);
27827 }
27828
27829 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27830 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27831 {
27832 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27833 I82860},
27834 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27835 index 33864c6..01edc61 100644
27836 --- a/drivers/edac/i82875p_edac.c
27837 +++ b/drivers/edac/i82875p_edac.c
27838 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27839 edac_mc_free(mci);
27840 }
27841
27842 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27843 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27844 {
27845 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27846 I82875P},
27847 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27848 index a5da732..983363b 100644
27849 --- a/drivers/edac/i82975x_edac.c
27850 +++ b/drivers/edac/i82975x_edac.c
27851 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27852 edac_mc_free(mci);
27853 }
27854
27855 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27856 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27857 {
27858 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27859 I82975X
27860 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27861 index 0106747..0b40417 100644
27862 --- a/drivers/edac/mce_amd.h
27863 +++ b/drivers/edac/mce_amd.h
27864 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
27865 bool (*dc_mce)(u16, u8);
27866 bool (*ic_mce)(u16, u8);
27867 bool (*nb_mce)(u16, u8);
27868 -};
27869 +} __no_const;
27870
27871 void amd_report_gart_errors(bool);
27872 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27873 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27874 index b153674..ad2ba9b 100644
27875 --- a/drivers/edac/r82600_edac.c
27876 +++ b/drivers/edac/r82600_edac.c
27877 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27878 edac_mc_free(mci);
27879 }
27880
27881 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27882 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27883 {
27884 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27885 },
27886 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27887 index 7a402bf..af0b211 100644
27888 --- a/drivers/edac/sb_edac.c
27889 +++ b/drivers/edac/sb_edac.c
27890 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27891 /*
27892 * pci_device_id table for which devices we are looking for
27893 */
27894 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27895 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27896 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27897 {0,} /* 0 terminated list. */
27898 };
27899 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27900 index b6f47de..c5acf3a 100644
27901 --- a/drivers/edac/x38_edac.c
27902 +++ b/drivers/edac/x38_edac.c
27903 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27904 edac_mc_free(mci);
27905 }
27906
27907 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27908 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27909 {
27910 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27911 X38},
27912 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27913 index 85661b0..c784559a 100644
27914 --- a/drivers/firewire/core-card.c
27915 +++ b/drivers/firewire/core-card.c
27916 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27917
27918 void fw_core_remove_card(struct fw_card *card)
27919 {
27920 - struct fw_card_driver dummy_driver = dummy_driver_template;
27921 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
27922
27923 card->driver->update_phy_reg(card, 4,
27924 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27925 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27926 index 4799393..37bd3ab 100644
27927 --- a/drivers/firewire/core-cdev.c
27928 +++ b/drivers/firewire/core-cdev.c
27929 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27930 int ret;
27931
27932 if ((request->channels == 0 && request->bandwidth == 0) ||
27933 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27934 - request->bandwidth < 0)
27935 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27936 return -EINVAL;
27937
27938 r = kmalloc(sizeof(*r), GFP_KERNEL);
27939 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27940 index 855ab3f..11f4bbd 100644
27941 --- a/drivers/firewire/core-transaction.c
27942 +++ b/drivers/firewire/core-transaction.c
27943 @@ -37,6 +37,7 @@
27944 #include <linux/timer.h>
27945 #include <linux/types.h>
27946 #include <linux/workqueue.h>
27947 +#include <linux/sched.h>
27948
27949 #include <asm/byteorder.h>
27950
27951 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27952 index b45be57..5fad18b 100644
27953 --- a/drivers/firewire/core.h
27954 +++ b/drivers/firewire/core.h
27955 @@ -101,6 +101,7 @@ struct fw_card_driver {
27956
27957 int (*stop_iso)(struct fw_iso_context *ctx);
27958 };
27959 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27960
27961 void fw_card_initialize(struct fw_card *card,
27962 const struct fw_card_driver *driver, struct device *device);
27963 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27964 index 153980b..4b4d046 100644
27965 --- a/drivers/firmware/dmi_scan.c
27966 +++ b/drivers/firmware/dmi_scan.c
27967 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27968 }
27969 }
27970 else {
27971 - /*
27972 - * no iounmap() for that ioremap(); it would be a no-op, but
27973 - * it's so early in setup that sucker gets confused into doing
27974 - * what it shouldn't if we actually call it.
27975 - */
27976 p = dmi_ioremap(0xF0000, 0x10000);
27977 if (p == NULL)
27978 goto error;
27979 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27980 if (buf == NULL)
27981 return -1;
27982
27983 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27984 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27985
27986 iounmap(buf);
27987 return 0;
27988 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27989 index 98723cb..10ca85b 100644
27990 --- a/drivers/gpio/gpio-vr41xx.c
27991 +++ b/drivers/gpio/gpio-vr41xx.c
27992 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27993 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27994 maskl, pendl, maskh, pendh);
27995
27996 - atomic_inc(&irq_err_count);
27997 + atomic_inc_unchecked(&irq_err_count);
27998
27999 return -EINVAL;
28000 }
28001 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
28002 index 8323fc3..5c1d755 100644
28003 --- a/drivers/gpu/drm/drm_crtc.c
28004 +++ b/drivers/gpu/drm/drm_crtc.c
28005 @@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28006 */
28007 if ((out_resp->count_modes >= mode_count) && mode_count) {
28008 copied = 0;
28009 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
28010 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
28011 list_for_each_entry(mode, &connector->modes, head) {
28012 drm_crtc_convert_to_umode(&u_mode, mode);
28013 if (copy_to_user(mode_ptr + copied,
28014 @@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28015
28016 if ((out_resp->count_props >= props_count) && props_count) {
28017 copied = 0;
28018 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
28019 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
28020 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
28021 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
28022 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
28023 if (connector->property_ids[i] != 0) {
28024 if (put_user(connector->property_ids[i],
28025 @@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28026
28027 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
28028 copied = 0;
28029 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
28030 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
28031 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
28032 if (connector->encoder_ids[i] != 0) {
28033 if (put_user(connector->encoder_ids[i],
28034 @@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
28035 }
28036
28037 for (i = 0; i < crtc_req->count_connectors; i++) {
28038 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
28039 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
28040 if (get_user(out_id, &set_connectors_ptr[i])) {
28041 ret = -EFAULT;
28042 goto out;
28043 @@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
28044 fb = obj_to_fb(obj);
28045
28046 num_clips = r->num_clips;
28047 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
28048 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
28049
28050 if (!num_clips != !clips_ptr) {
28051 ret = -EINVAL;
28052 @@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28053 out_resp->flags = property->flags;
28054
28055 if ((out_resp->count_values >= value_count) && value_count) {
28056 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
28057 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
28058 for (i = 0; i < value_count; i++) {
28059 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
28060 ret = -EFAULT;
28061 @@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28062 if (property->flags & DRM_MODE_PROP_ENUM) {
28063 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
28064 copied = 0;
28065 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
28066 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
28067 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
28068
28069 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
28070 @@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28071 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
28072 copied = 0;
28073 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
28074 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
28075 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
28076
28077 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
28078 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
28079 @@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28080 struct drm_mode_get_blob *out_resp = data;
28081 struct drm_property_blob *blob;
28082 int ret = 0;
28083 - void *blob_ptr;
28084 + void __user *blob_ptr;
28085
28086 if (!drm_core_check_feature(dev, DRIVER_MODESET))
28087 return -EINVAL;
28088 @@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28089 blob = obj_to_blob(obj);
28090
28091 if (out_resp->length == blob->length) {
28092 - blob_ptr = (void *)(unsigned long)out_resp->data;
28093 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
28094 if (copy_to_user(blob_ptr, blob->data, blob->length)){
28095 ret = -EFAULT;
28096 goto done;
28097 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
28098 index d2619d7..bd6bd00 100644
28099 --- a/drivers/gpu/drm/drm_crtc_helper.c
28100 +++ b/drivers/gpu/drm/drm_crtc_helper.c
28101 @@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
28102 struct drm_crtc *tmp;
28103 int crtc_mask = 1;
28104
28105 - WARN(!crtc, "checking null crtc?\n");
28106 + BUG_ON(!crtc);
28107
28108 dev = crtc->dev;
28109
28110 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
28111 index 40c187c..5746164 100644
28112 --- a/drivers/gpu/drm/drm_drv.c
28113 +++ b/drivers/gpu/drm/drm_drv.c
28114 @@ -308,7 +308,7 @@ module_exit(drm_core_exit);
28115 /**
28116 * Copy and IOCTL return string to user space
28117 */
28118 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
28119 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
28120 {
28121 int len;
28122
28123 @@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
28124
28125 dev = file_priv->minor->dev;
28126 atomic_inc(&dev->ioctl_count);
28127 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28128 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28129 ++file_priv->ioctl_count;
28130
28131 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28132 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
28133 index 828bf65..cdaa0e9 100644
28134 --- a/drivers/gpu/drm/drm_fops.c
28135 +++ b/drivers/gpu/drm/drm_fops.c
28136 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
28137 }
28138
28139 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28140 - atomic_set(&dev->counts[i], 0);
28141 + atomic_set_unchecked(&dev->counts[i], 0);
28142
28143 dev->sigdata.lock = NULL;
28144
28145 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
28146
28147 retcode = drm_open_helper(inode, filp, dev);
28148 if (!retcode) {
28149 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28150 - if (!dev->open_count++)
28151 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28152 + if (local_inc_return(&dev->open_count) == 1)
28153 retcode = drm_setup(dev);
28154 }
28155 if (!retcode) {
28156 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
28157
28158 mutex_lock(&drm_global_mutex);
28159
28160 - DRM_DEBUG("open_count = %d\n", dev->open_count);
28161 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28162
28163 if (dev->driver->preclose)
28164 dev->driver->preclose(dev, file_priv);
28165 @@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
28166 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28167 task_pid_nr(current),
28168 (long)old_encode_dev(file_priv->minor->device),
28169 - dev->open_count);
28170 + local_read(&dev->open_count));
28171
28172 /* Release any auth tokens that might point to this file_priv,
28173 (do that under the drm_global_mutex) */
28174 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
28175 * End inline drm_release
28176 */
28177
28178 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28179 - if (!--dev->open_count) {
28180 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28181 + if (local_dec_and_test(&dev->open_count)) {
28182 if (atomic_read(&dev->ioctl_count)) {
28183 DRM_ERROR("Device busy: %d\n",
28184 atomic_read(&dev->ioctl_count));
28185 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
28186 index c87dc96..326055d 100644
28187 --- a/drivers/gpu/drm/drm_global.c
28188 +++ b/drivers/gpu/drm/drm_global.c
28189 @@ -36,7 +36,7 @@
28190 struct drm_global_item {
28191 struct mutex mutex;
28192 void *object;
28193 - int refcount;
28194 + atomic_t refcount;
28195 };
28196
28197 static struct drm_global_item glob[DRM_GLOBAL_NUM];
28198 @@ -49,7 +49,7 @@ void drm_global_init(void)
28199 struct drm_global_item *item = &glob[i];
28200 mutex_init(&item->mutex);
28201 item->object = NULL;
28202 - item->refcount = 0;
28203 + atomic_set(&item->refcount, 0);
28204 }
28205 }
28206
28207 @@ -59,7 +59,7 @@ void drm_global_release(void)
28208 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
28209 struct drm_global_item *item = &glob[i];
28210 BUG_ON(item->object != NULL);
28211 - BUG_ON(item->refcount != 0);
28212 + BUG_ON(atomic_read(&item->refcount) != 0);
28213 }
28214 }
28215
28216 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28217 void *object;
28218
28219 mutex_lock(&item->mutex);
28220 - if (item->refcount == 0) {
28221 + if (atomic_read(&item->refcount) == 0) {
28222 item->object = kzalloc(ref->size, GFP_KERNEL);
28223 if (unlikely(item->object == NULL)) {
28224 ret = -ENOMEM;
28225 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28226 goto out_err;
28227
28228 }
28229 - ++item->refcount;
28230 + atomic_inc(&item->refcount);
28231 ref->object = item->object;
28232 object = item->object;
28233 mutex_unlock(&item->mutex);
28234 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
28235 struct drm_global_item *item = &glob[ref->global_type];
28236
28237 mutex_lock(&item->mutex);
28238 - BUG_ON(item->refcount == 0);
28239 + BUG_ON(atomic_read(&item->refcount) == 0);
28240 BUG_ON(ref->object != item->object);
28241 - if (--item->refcount == 0) {
28242 + if (atomic_dec_and_test(&item->refcount)) {
28243 ref->release(ref);
28244 item->object = NULL;
28245 }
28246 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28247 index ab1162d..42587b2 100644
28248 --- a/drivers/gpu/drm/drm_info.c
28249 +++ b/drivers/gpu/drm/drm_info.c
28250 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28251 struct drm_local_map *map;
28252 struct drm_map_list *r_list;
28253
28254 - /* Hardcoded from _DRM_FRAME_BUFFER,
28255 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28256 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28257 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28258 + static const char * const types[] = {
28259 + [_DRM_FRAME_BUFFER] = "FB",
28260 + [_DRM_REGISTERS] = "REG",
28261 + [_DRM_SHM] = "SHM",
28262 + [_DRM_AGP] = "AGP",
28263 + [_DRM_SCATTER_GATHER] = "SG",
28264 + [_DRM_CONSISTENT] = "PCI",
28265 + [_DRM_GEM] = "GEM" };
28266 const char *type;
28267 int i;
28268
28269 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28270 map = r_list->map;
28271 if (!map)
28272 continue;
28273 - if (map->type < 0 || map->type > 5)
28274 + if (map->type >= ARRAY_SIZE(types))
28275 type = "??";
28276 else
28277 type = types[map->type];
28278 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28279 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28280 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28281 vma->vm_flags & VM_IO ? 'i' : '-',
28282 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28283 + 0);
28284 +#else
28285 vma->vm_pgoff);
28286 +#endif
28287
28288 #if defined(__i386__)
28289 pgprot = pgprot_val(vma->vm_page_prot);
28290 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28291 index ddd70db..40321e6 100644
28292 --- a/drivers/gpu/drm/drm_ioc32.c
28293 +++ b/drivers/gpu/drm/drm_ioc32.c
28294 @@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28295 request = compat_alloc_user_space(nbytes);
28296 if (!access_ok(VERIFY_WRITE, request, nbytes))
28297 return -EFAULT;
28298 - list = (struct drm_buf_desc *) (request + 1);
28299 + list = (struct drm_buf_desc __user *) (request + 1);
28300
28301 if (__put_user(count, &request->count)
28302 || __put_user(list, &request->list))
28303 @@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28304 request = compat_alloc_user_space(nbytes);
28305 if (!access_ok(VERIFY_WRITE, request, nbytes))
28306 return -EFAULT;
28307 - list = (struct drm_buf_pub *) (request + 1);
28308 + list = (struct drm_buf_pub __user *) (request + 1);
28309
28310 if (__put_user(count, &request->count)
28311 || __put_user(list, &request->list))
28312 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28313 index 904d7e9..ab88581 100644
28314 --- a/drivers/gpu/drm/drm_ioctl.c
28315 +++ b/drivers/gpu/drm/drm_ioctl.c
28316 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28317 stats->data[i].value =
28318 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28319 else
28320 - stats->data[i].value = atomic_read(&dev->counts[i]);
28321 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28322 stats->data[i].type = dev->types[i];
28323 }
28324
28325 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28326 index 632ae24..244cf4a 100644
28327 --- a/drivers/gpu/drm/drm_lock.c
28328 +++ b/drivers/gpu/drm/drm_lock.c
28329 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28330 if (drm_lock_take(&master->lock, lock->context)) {
28331 master->lock.file_priv = file_priv;
28332 master->lock.lock_time = jiffies;
28333 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28334 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28335 break; /* Got lock */
28336 }
28337
28338 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28339 return -EINVAL;
28340 }
28341
28342 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28343 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28344
28345 if (drm_lock_free(&master->lock, lock->context)) {
28346 /* FIXME: Should really bail out here. */
28347 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28348 index 8f371e8..9f85d52 100644
28349 --- a/drivers/gpu/drm/i810/i810_dma.c
28350 +++ b/drivers/gpu/drm/i810/i810_dma.c
28351 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28352 dma->buflist[vertex->idx],
28353 vertex->discard, vertex->used);
28354
28355 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28356 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28357 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28358 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28359 sarea_priv->last_enqueue = dev_priv->counter - 1;
28360 sarea_priv->last_dispatch = (int)hw_status[5];
28361
28362 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28363 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28364 mc->last_render);
28365
28366 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28367 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28368 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28369 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28370 sarea_priv->last_enqueue = dev_priv->counter - 1;
28371 sarea_priv->last_dispatch = (int)hw_status[5];
28372
28373 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28374 index c9339f4..f5e1b9d 100644
28375 --- a/drivers/gpu/drm/i810/i810_drv.h
28376 +++ b/drivers/gpu/drm/i810/i810_drv.h
28377 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28378 int page_flipping;
28379
28380 wait_queue_head_t irq_queue;
28381 - atomic_t irq_received;
28382 - atomic_t irq_emitted;
28383 + atomic_unchecked_t irq_received;
28384 + atomic_unchecked_t irq_emitted;
28385
28386 int front_offset;
28387 } drm_i810_private_t;
28388 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28389 index b2e3c97..58cf079 100644
28390 --- a/drivers/gpu/drm/i915/i915_debugfs.c
28391 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
28392 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28393 I915_READ(GTIMR));
28394 }
28395 seq_printf(m, "Interrupts received: %d\n",
28396 - atomic_read(&dev_priv->irq_received));
28397 + atomic_read_unchecked(&dev_priv->irq_received));
28398 for (i = 0; i < I915_NUM_RINGS; i++) {
28399 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28400 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28401 @@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28402 return ret;
28403
28404 if (opregion->header)
28405 - seq_write(m, opregion->header, OPREGION_SIZE);
28406 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28407
28408 mutex_unlock(&dev->struct_mutex);
28409
28410 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28411 index c4da951..3c59c5c 100644
28412 --- a/drivers/gpu/drm/i915/i915_dma.c
28413 +++ b/drivers/gpu/drm/i915/i915_dma.c
28414 @@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28415 bool can_switch;
28416
28417 spin_lock(&dev->count_lock);
28418 - can_switch = (dev->open_count == 0);
28419 + can_switch = (local_read(&dev->open_count) == 0);
28420 spin_unlock(&dev->count_lock);
28421 return can_switch;
28422 }
28423 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28424 index ae294a0..1755461 100644
28425 --- a/drivers/gpu/drm/i915/i915_drv.h
28426 +++ b/drivers/gpu/drm/i915/i915_drv.h
28427 @@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28428 /* render clock increase/decrease */
28429 /* display clock increase/decrease */
28430 /* pll clock increase/decrease */
28431 -};
28432 +} __no_const;
28433
28434 struct intel_device_info {
28435 u8 gen;
28436 @@ -318,7 +318,7 @@ typedef struct drm_i915_private {
28437 int current_page;
28438 int page_flipping;
28439
28440 - atomic_t irq_received;
28441 + atomic_unchecked_t irq_received;
28442
28443 /* protects the irq masks */
28444 spinlock_t irq_lock;
28445 @@ -893,7 +893,7 @@ struct drm_i915_gem_object {
28446 * will be page flipped away on the next vblank. When it
28447 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28448 */
28449 - atomic_t pending_flip;
28450 + atomic_unchecked_t pending_flip;
28451 };
28452
28453 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28454 @@ -1273,7 +1273,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28455 extern void intel_teardown_gmbus(struct drm_device *dev);
28456 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28457 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28458 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28459 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28460 {
28461 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28462 }
28463 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28464 index b9da890..cad1d98 100644
28465 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28466 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28467 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28468 i915_gem_clflush_object(obj);
28469
28470 if (obj->base.pending_write_domain)
28471 - cd->flips |= atomic_read(&obj->pending_flip);
28472 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28473
28474 /* The actual obj->write_domain will be updated with
28475 * pending_write_domain after we emit the accumulated flush for all
28476 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28477
28478 static int
28479 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28480 - int count)
28481 + unsigned int count)
28482 {
28483 - int i;
28484 + unsigned int i;
28485
28486 for (i = 0; i < count; i++) {
28487 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28488 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28489 index d47a53b..61154c2 100644
28490 --- a/drivers/gpu/drm/i915/i915_irq.c
28491 +++ b/drivers/gpu/drm/i915/i915_irq.c
28492 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28493 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28494 struct drm_i915_master_private *master_priv;
28495
28496 - atomic_inc(&dev_priv->irq_received);
28497 + atomic_inc_unchecked(&dev_priv->irq_received);
28498
28499 /* disable master interrupt before clearing iir */
28500 de_ier = I915_READ(DEIER);
28501 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28502 struct drm_i915_master_private *master_priv;
28503 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28504
28505 - atomic_inc(&dev_priv->irq_received);
28506 + atomic_inc_unchecked(&dev_priv->irq_received);
28507
28508 if (IS_GEN6(dev))
28509 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28510 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28511 int ret = IRQ_NONE, pipe;
28512 bool blc_event = false;
28513
28514 - atomic_inc(&dev_priv->irq_received);
28515 + atomic_inc_unchecked(&dev_priv->irq_received);
28516
28517 iir = I915_READ(IIR);
28518
28519 @@ -1750,7 +1750,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28520 {
28521 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28522
28523 - atomic_set(&dev_priv->irq_received, 0);
28524 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28525
28526 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28527 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28528 @@ -1938,7 +1938,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28529 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28530 int pipe;
28531
28532 - atomic_set(&dev_priv->irq_received, 0);
28533 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28534
28535 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28536 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28537 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28538 index daa5743..c0757a9 100644
28539 --- a/drivers/gpu/drm/i915/intel_display.c
28540 +++ b/drivers/gpu/drm/i915/intel_display.c
28541 @@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28542
28543 wait_event(dev_priv->pending_flip_queue,
28544 atomic_read(&dev_priv->mm.wedged) ||
28545 - atomic_read(&obj->pending_flip) == 0);
28546 + atomic_read_unchecked(&obj->pending_flip) == 0);
28547
28548 /* Big Hammer, we also need to ensure that any pending
28549 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28550 @@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28551 obj = to_intel_framebuffer(crtc->fb)->obj;
28552 dev_priv = crtc->dev->dev_private;
28553 wait_event(dev_priv->pending_flip_queue,
28554 - atomic_read(&obj->pending_flip) == 0);
28555 + atomic_read_unchecked(&obj->pending_flip) == 0);
28556 }
28557
28558 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28559 @@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28560
28561 atomic_clear_mask(1 << intel_crtc->plane,
28562 &obj->pending_flip.counter);
28563 - if (atomic_read(&obj->pending_flip) == 0)
28564 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
28565 wake_up(&dev_priv->pending_flip_queue);
28566
28567 schedule_work(&work->work);
28568 @@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28569 /* Block clients from rendering to the new back buffer until
28570 * the flip occurs and the object is no longer visible.
28571 */
28572 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28573 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28574
28575 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28576 if (ret)
28577 @@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28578 return 0;
28579
28580 cleanup_pending:
28581 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28582 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28583 drm_gem_object_unreference(&work->old_fb_obj->base);
28584 drm_gem_object_unreference(&obj->base);
28585 mutex_unlock(&dev->struct_mutex);
28586 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28587 index 54558a0..2d97005 100644
28588 --- a/drivers/gpu/drm/mga/mga_drv.h
28589 +++ b/drivers/gpu/drm/mga/mga_drv.h
28590 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28591 u32 clear_cmd;
28592 u32 maccess;
28593
28594 - atomic_t vbl_received; /**< Number of vblanks received. */
28595 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28596 wait_queue_head_t fence_queue;
28597 - atomic_t last_fence_retired;
28598 + atomic_unchecked_t last_fence_retired;
28599 u32 next_fence_to_post;
28600
28601 unsigned int fb_cpp;
28602 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28603 index 2581202..f230a8d9 100644
28604 --- a/drivers/gpu/drm/mga/mga_irq.c
28605 +++ b/drivers/gpu/drm/mga/mga_irq.c
28606 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28607 if (crtc != 0)
28608 return 0;
28609
28610 - return atomic_read(&dev_priv->vbl_received);
28611 + return atomic_read_unchecked(&dev_priv->vbl_received);
28612 }
28613
28614
28615 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28616 /* VBLANK interrupt */
28617 if (status & MGA_VLINEPEN) {
28618 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28619 - atomic_inc(&dev_priv->vbl_received);
28620 + atomic_inc_unchecked(&dev_priv->vbl_received);
28621 drm_handle_vblank(dev, 0);
28622 handled = 1;
28623 }
28624 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28625 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28626 MGA_WRITE(MGA_PRIMEND, prim_end);
28627
28628 - atomic_inc(&dev_priv->last_fence_retired);
28629 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28630 DRM_WAKEUP(&dev_priv->fence_queue);
28631 handled = 1;
28632 }
28633 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28634 * using fences.
28635 */
28636 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28637 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28638 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28639 - *sequence) <= (1 << 23)));
28640
28641 *sequence = cur_fence;
28642 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28643 index 5fc201b..7b032b9 100644
28644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28645 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28646 @@ -201,7 +201,7 @@ struct methods {
28647 const char desc[8];
28648 void (*loadbios)(struct drm_device *, uint8_t *);
28649 const bool rw;
28650 -};
28651 +} __do_const;
28652
28653 static struct methods shadow_methods[] = {
28654 { "PRAMIN", load_vbios_pramin, true },
28655 @@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28656 struct bit_table {
28657 const char id;
28658 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28659 -};
28660 +} __no_const;
28661
28662 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28663
28664 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28665 index 4c0be3a..5757582 100644
28666 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28667 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28668 @@ -238,7 +238,7 @@ struct nouveau_channel {
28669 struct list_head pending;
28670 uint32_t sequence;
28671 uint32_t sequence_ack;
28672 - atomic_t last_sequence_irq;
28673 + atomic_unchecked_t last_sequence_irq;
28674 struct nouveau_vma vma;
28675 } fence;
28676
28677 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28678 u32 handle, u16 class);
28679 void (*set_tile_region)(struct drm_device *dev, int i);
28680 void (*tlb_flush)(struct drm_device *, int engine);
28681 -};
28682 +} __no_const;
28683
28684 struct nouveau_instmem_engine {
28685 void *priv;
28686 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28687 struct nouveau_mc_engine {
28688 int (*init)(struct drm_device *dev);
28689 void (*takedown)(struct drm_device *dev);
28690 -};
28691 +} __no_const;
28692
28693 struct nouveau_timer_engine {
28694 int (*init)(struct drm_device *dev);
28695 void (*takedown)(struct drm_device *dev);
28696 uint64_t (*read)(struct drm_device *dev);
28697 -};
28698 +} __no_const;
28699
28700 struct nouveau_fb_engine {
28701 int num_tiles;
28702 @@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28703 void (*put)(struct drm_device *, struct nouveau_mem **);
28704
28705 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28706 -};
28707 +} __no_const;
28708
28709 struct nouveau_engine {
28710 struct nouveau_instmem_engine instmem;
28711 @@ -706,7 +706,7 @@ struct drm_nouveau_private {
28712 struct drm_global_reference mem_global_ref;
28713 struct ttm_bo_global_ref bo_global_ref;
28714 struct ttm_bo_device bdev;
28715 - atomic_t validate_sequence;
28716 + atomic_unchecked_t validate_sequence;
28717 } ttm;
28718
28719 struct {
28720 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28721 index 2f6daae..c9d7b9e 100644
28722 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28723 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28724 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28725 if (USE_REFCNT(dev))
28726 sequence = nvchan_rd32(chan, 0x48);
28727 else
28728 - sequence = atomic_read(&chan->fence.last_sequence_irq);
28729 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28730
28731 if (chan->fence.sequence_ack == sequence)
28732 goto out;
28733 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28734 return ret;
28735 }
28736
28737 - atomic_set(&chan->fence.last_sequence_irq, 0);
28738 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28739 return 0;
28740 }
28741
28742 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28743 index 7ce3fde..cb3ea04 100644
28744 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28745 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28746 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28747 int trycnt = 0;
28748 int ret, i;
28749
28750 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28751 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28752 retry:
28753 if (++trycnt > 100000) {
28754 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28755 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28756 index d8831ab..0ba8356 100644
28757 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
28758 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28759 @@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28760 bool can_switch;
28761
28762 spin_lock(&dev->count_lock);
28763 - can_switch = (dev->open_count == 0);
28764 + can_switch = (local_read(&dev->open_count) == 0);
28765 spin_unlock(&dev->count_lock);
28766 return can_switch;
28767 }
28768 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28769 index dbdea8e..cd6eeeb 100644
28770 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
28771 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28772 @@ -554,7 +554,7 @@ static int
28773 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28774 u32 class, u32 mthd, u32 data)
28775 {
28776 - atomic_set(&chan->fence.last_sequence_irq, data);
28777 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28778 return 0;
28779 }
28780
28781 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28782 index bcac90b..53bfc76 100644
28783 --- a/drivers/gpu/drm/r128/r128_cce.c
28784 +++ b/drivers/gpu/drm/r128/r128_cce.c
28785 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28786
28787 /* GH: Simple idle check.
28788 */
28789 - atomic_set(&dev_priv->idle_count, 0);
28790 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28791
28792 /* We don't support anything other than bus-mastering ring mode,
28793 * but the ring can be in either AGP or PCI space for the ring
28794 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28795 index 930c71b..499aded 100644
28796 --- a/drivers/gpu/drm/r128/r128_drv.h
28797 +++ b/drivers/gpu/drm/r128/r128_drv.h
28798 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28799 int is_pci;
28800 unsigned long cce_buffers_offset;
28801
28802 - atomic_t idle_count;
28803 + atomic_unchecked_t idle_count;
28804
28805 int page_flipping;
28806 int current_page;
28807 u32 crtc_offset;
28808 u32 crtc_offset_cntl;
28809
28810 - atomic_t vbl_received;
28811 + atomic_unchecked_t vbl_received;
28812
28813 u32 color_fmt;
28814 unsigned int front_offset;
28815 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28816 index 429d5a0..7e899ed 100644
28817 --- a/drivers/gpu/drm/r128/r128_irq.c
28818 +++ b/drivers/gpu/drm/r128/r128_irq.c
28819 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28820 if (crtc != 0)
28821 return 0;
28822
28823 - return atomic_read(&dev_priv->vbl_received);
28824 + return atomic_read_unchecked(&dev_priv->vbl_received);
28825 }
28826
28827 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28828 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28829 /* VBLANK interrupt */
28830 if (status & R128_CRTC_VBLANK_INT) {
28831 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28832 - atomic_inc(&dev_priv->vbl_received);
28833 + atomic_inc_unchecked(&dev_priv->vbl_received);
28834 drm_handle_vblank(dev, 0);
28835 return IRQ_HANDLED;
28836 }
28837 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28838 index a9e33ce..09edd4b 100644
28839 --- a/drivers/gpu/drm/r128/r128_state.c
28840 +++ b/drivers/gpu/drm/r128/r128_state.c
28841 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28842
28843 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28844 {
28845 - if (atomic_read(&dev_priv->idle_count) == 0)
28846 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28847 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28848 else
28849 - atomic_set(&dev_priv->idle_count, 0);
28850 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28851 }
28852
28853 #endif
28854 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28855 index 5a82b6b..9e69c73 100644
28856 --- a/drivers/gpu/drm/radeon/mkregtable.c
28857 +++ b/drivers/gpu/drm/radeon/mkregtable.c
28858 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28859 regex_t mask_rex;
28860 regmatch_t match[4];
28861 char buf[1024];
28862 - size_t end;
28863 + long end;
28864 int len;
28865 int done = 0;
28866 int r;
28867 unsigned o;
28868 struct offset *offset;
28869 char last_reg_s[10];
28870 - int last_reg;
28871 + unsigned long last_reg;
28872
28873 if (regcomp
28874 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28875 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28876 index 8227e76..ce0b195 100644
28877 --- a/drivers/gpu/drm/radeon/radeon.h
28878 +++ b/drivers/gpu/drm/radeon/radeon.h
28879 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28880 */
28881 struct radeon_fence_driver {
28882 uint32_t scratch_reg;
28883 - atomic_t seq;
28884 + atomic_unchecked_t seq;
28885 uint32_t last_seq;
28886 unsigned long last_jiffies;
28887 unsigned long last_timeout;
28888 @@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28889 int x2, int y2);
28890 void (*draw_auto)(struct radeon_device *rdev);
28891 void (*set_default_state)(struct radeon_device *rdev);
28892 -};
28893 +} __no_const;
28894
28895 struct r600_blit {
28896 struct mutex mutex;
28897 @@ -954,7 +954,7 @@ struct radeon_asic {
28898 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28899 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28900 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28901 -};
28902 +} __no_const;
28903
28904 /*
28905 * Asic structures
28906 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28907 index 9231564..78b00fd 100644
28908 --- a/drivers/gpu/drm/radeon/radeon_device.c
28909 +++ b/drivers/gpu/drm/radeon/radeon_device.c
28910 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28911 bool can_switch;
28912
28913 spin_lock(&dev->count_lock);
28914 - can_switch = (dev->open_count == 0);
28915 + can_switch = (local_read(&dev->open_count) == 0);
28916 spin_unlock(&dev->count_lock);
28917 return can_switch;
28918 }
28919 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28920 index a1b59ca..86f2d44 100644
28921 --- a/drivers/gpu/drm/radeon/radeon_drv.h
28922 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
28923 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28924
28925 /* SW interrupt */
28926 wait_queue_head_t swi_queue;
28927 - atomic_t swi_emitted;
28928 + atomic_unchecked_t swi_emitted;
28929 int vblank_crtc;
28930 uint32_t irq_enable_reg;
28931 uint32_t r500_disp_irq_reg;
28932 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28933 index 76ec0e9..6feb1a3 100644
28934 --- a/drivers/gpu/drm/radeon/radeon_fence.c
28935 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
28936 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28937 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28938 return 0;
28939 }
28940 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28941 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28942 if (!rdev->cp.ready)
28943 /* FIXME: cp is not running assume everythings is done right
28944 * away
28945 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28946 return r;
28947 }
28948 radeon_fence_write(rdev, 0);
28949 - atomic_set(&rdev->fence_drv.seq, 0);
28950 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28951 INIT_LIST_HEAD(&rdev->fence_drv.created);
28952 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28953 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28954 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28955 index 48b7cea..342236f 100644
28956 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28957 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28958 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28959 request = compat_alloc_user_space(sizeof(*request));
28960 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28961 || __put_user(req32.param, &request->param)
28962 - || __put_user((void __user *)(unsigned long)req32.value,
28963 + || __put_user((unsigned long)req32.value,
28964 &request->value))
28965 return -EFAULT;
28966
28967 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28968 index 00da384..32f972d 100644
28969 --- a/drivers/gpu/drm/radeon/radeon_irq.c
28970 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
28971 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28972 unsigned int ret;
28973 RING_LOCALS;
28974
28975 - atomic_inc(&dev_priv->swi_emitted);
28976 - ret = atomic_read(&dev_priv->swi_emitted);
28977 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28978 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28979
28980 BEGIN_RING(4);
28981 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28982 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28983 drm_radeon_private_t *dev_priv =
28984 (drm_radeon_private_t *) dev->dev_private;
28985
28986 - atomic_set(&dev_priv->swi_emitted, 0);
28987 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28988 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28989
28990 dev->max_vblank_count = 0x001fffff;
28991 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28992 index e8422ae..d22d4a8 100644
28993 --- a/drivers/gpu/drm/radeon/radeon_state.c
28994 +++ b/drivers/gpu/drm/radeon/radeon_state.c
28995 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28996 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28997 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28998
28999 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
29000 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
29001 sarea_priv->nbox * sizeof(depth_boxes[0])))
29002 return -EFAULT;
29003
29004 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
29005 {
29006 drm_radeon_private_t *dev_priv = dev->dev_private;
29007 drm_radeon_getparam_t *param = data;
29008 - int value;
29009 + int value = 0;
29010
29011 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29012
29013 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
29014 index 0b5468b..9c4b308 100644
29015 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
29016 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
29017 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29018 }
29019 if (unlikely(ttm_vm_ops == NULL)) {
29020 ttm_vm_ops = vma->vm_ops;
29021 - radeon_ttm_vm_ops = *ttm_vm_ops;
29022 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29023 + pax_open_kernel();
29024 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
29025 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29026 + pax_close_kernel();
29027 }
29028 vma->vm_ops = &radeon_ttm_vm_ops;
29029 return 0;
29030 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
29031 index a9049ed..501f284 100644
29032 --- a/drivers/gpu/drm/radeon/rs690.c
29033 +++ b/drivers/gpu/drm/radeon/rs690.c
29034 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
29035 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29036 rdev->pm.sideport_bandwidth.full)
29037 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29038 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
29039 + read_delay_latency.full = dfixed_const(800 * 1000);
29040 read_delay_latency.full = dfixed_div(read_delay_latency,
29041 rdev->pm.igp_sideport_mclk);
29042 + a.full = dfixed_const(370);
29043 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
29044 } else {
29045 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29046 rdev->pm.k8_bandwidth.full)
29047 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29048 index 727e93d..1565650 100644
29049 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
29050 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29051 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
29052 static int ttm_pool_mm_shrink(struct shrinker *shrink,
29053 struct shrink_control *sc)
29054 {
29055 - static atomic_t start_pool = ATOMIC_INIT(0);
29056 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
29057 unsigned i;
29058 - unsigned pool_offset = atomic_add_return(1, &start_pool);
29059 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
29060 struct ttm_page_pool *pool;
29061 int shrink_pages = sc->nr_to_scan;
29062
29063 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
29064 index 9cf87d9..2000b7d 100644
29065 --- a/drivers/gpu/drm/via/via_drv.h
29066 +++ b/drivers/gpu/drm/via/via_drv.h
29067 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29068 typedef uint32_t maskarray_t[5];
29069
29070 typedef struct drm_via_irq {
29071 - atomic_t irq_received;
29072 + atomic_unchecked_t irq_received;
29073 uint32_t pending_mask;
29074 uint32_t enable_mask;
29075 wait_queue_head_t irq_queue;
29076 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
29077 struct timeval last_vblank;
29078 int last_vblank_valid;
29079 unsigned usec_per_vblank;
29080 - atomic_t vbl_received;
29081 + atomic_unchecked_t vbl_received;
29082 drm_via_state_t hc_state;
29083 char pci_buf[VIA_PCI_BUF_SIZE];
29084 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29085 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
29086 index d391f48..10c8ca3 100644
29087 --- a/drivers/gpu/drm/via/via_irq.c
29088 +++ b/drivers/gpu/drm/via/via_irq.c
29089 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
29090 if (crtc != 0)
29091 return 0;
29092
29093 - return atomic_read(&dev_priv->vbl_received);
29094 + return atomic_read_unchecked(&dev_priv->vbl_received);
29095 }
29096
29097 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29098 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29099
29100 status = VIA_READ(VIA_REG_INTERRUPT);
29101 if (status & VIA_IRQ_VBLANK_PENDING) {
29102 - atomic_inc(&dev_priv->vbl_received);
29103 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29104 + atomic_inc_unchecked(&dev_priv->vbl_received);
29105 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29106 do_gettimeofday(&cur_vblank);
29107 if (dev_priv->last_vblank_valid) {
29108 dev_priv->usec_per_vblank =
29109 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29110 dev_priv->last_vblank = cur_vblank;
29111 dev_priv->last_vblank_valid = 1;
29112 }
29113 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29114 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29115 DRM_DEBUG("US per vblank is: %u\n",
29116 dev_priv->usec_per_vblank);
29117 }
29118 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29119
29120 for (i = 0; i < dev_priv->num_irqs; ++i) {
29121 if (status & cur_irq->pending_mask) {
29122 - atomic_inc(&cur_irq->irq_received);
29123 + atomic_inc_unchecked(&cur_irq->irq_received);
29124 DRM_WAKEUP(&cur_irq->irq_queue);
29125 handled = 1;
29126 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
29127 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
29128 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29129 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29130 masks[irq][4]));
29131 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29132 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29133 } else {
29134 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29135 (((cur_irq_sequence =
29136 - atomic_read(&cur_irq->irq_received)) -
29137 + atomic_read_unchecked(&cur_irq->irq_received)) -
29138 *sequence) <= (1 << 23)));
29139 }
29140 *sequence = cur_irq_sequence;
29141 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
29142 }
29143
29144 for (i = 0; i < dev_priv->num_irqs; ++i) {
29145 - atomic_set(&cur_irq->irq_received, 0);
29146 + atomic_set_unchecked(&cur_irq->irq_received, 0);
29147 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29148 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29149 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29150 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
29151 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29152 case VIA_IRQ_RELATIVE:
29153 irqwait->request.sequence +=
29154 - atomic_read(&cur_irq->irq_received);
29155 + atomic_read_unchecked(&cur_irq->irq_received);
29156 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29157 case VIA_IRQ_ABSOLUTE:
29158 break;
29159 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29160 index dc27970..f18b008 100644
29161 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29162 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29163 @@ -260,7 +260,7 @@ struct vmw_private {
29164 * Fencing and IRQs.
29165 */
29166
29167 - atomic_t marker_seq;
29168 + atomic_unchecked_t marker_seq;
29169 wait_queue_head_t fence_queue;
29170 wait_queue_head_t fifo_queue;
29171 int fence_queue_waiters; /* Protected by hw_mutex */
29172 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29173 index a0c2f12..68ae6cb 100644
29174 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29175 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29176 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
29177 (unsigned int) min,
29178 (unsigned int) fifo->capabilities);
29179
29180 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
29181 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
29182 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
29183 vmw_marker_queue_init(&fifo->marker_queue);
29184 return vmw_fifo_send_fence(dev_priv, &dummy);
29185 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
29186 if (reserveable)
29187 iowrite32(bytes, fifo_mem +
29188 SVGA_FIFO_RESERVED);
29189 - return fifo_mem + (next_cmd >> 2);
29190 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29191 } else {
29192 need_bounce = true;
29193 }
29194 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29195
29196 fm = vmw_fifo_reserve(dev_priv, bytes);
29197 if (unlikely(fm == NULL)) {
29198 - *seqno = atomic_read(&dev_priv->marker_seq);
29199 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29200 ret = -ENOMEM;
29201 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
29202 false, 3*HZ);
29203 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29204 }
29205
29206 do {
29207 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
29208 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
29209 } while (*seqno == 0);
29210
29211 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
29212 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29213 index cabc95f..14b3d77 100644
29214 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29215 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29216 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
29217 * emitted. Then the fence is stale and signaled.
29218 */
29219
29220 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
29221 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
29222 > VMW_FENCE_WRAP);
29223
29224 return ret;
29225 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
29226
29227 if (fifo_idle)
29228 down_read(&fifo_state->rwsem);
29229 - signal_seq = atomic_read(&dev_priv->marker_seq);
29230 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
29231 ret = 0;
29232
29233 for (;;) {
29234 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29235 index 8a8725c..afed796 100644
29236 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29237 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29238 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
29239 while (!vmw_lag_lt(queue, us)) {
29240 spin_lock(&queue->lock);
29241 if (list_empty(&queue->head))
29242 - seqno = atomic_read(&dev_priv->marker_seq);
29243 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29244 else {
29245 marker = list_first_entry(&queue->head,
29246 struct vmw_marker, head);
29247 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29248 index bb656d8..4169fca 100644
29249 --- a/drivers/hid/hid-core.c
29250 +++ b/drivers/hid/hid-core.c
29251 @@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
29252
29253 int hid_add_device(struct hid_device *hdev)
29254 {
29255 - static atomic_t id = ATOMIC_INIT(0);
29256 + static atomic_unchecked_t id = ATOMIC_INIT(0);
29257 int ret;
29258
29259 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29260 @@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
29261 /* XXX hack, any other cleaner solution after the driver core
29262 * is converted to allow more than 20 bytes as the device name? */
29263 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29264 - hdev->vendor, hdev->product, atomic_inc_return(&id));
29265 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29266
29267 hid_debug_register(hdev, dev_name(&hdev->dev));
29268 ret = device_add(&hdev->dev);
29269 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29270 index 4ef02b2..8a96831 100644
29271 --- a/drivers/hid/usbhid/hiddev.c
29272 +++ b/drivers/hid/usbhid/hiddev.c
29273 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29274 break;
29275
29276 case HIDIOCAPPLICATION:
29277 - if (arg < 0 || arg >= hid->maxapplication)
29278 + if (arg >= hid->maxapplication)
29279 break;
29280
29281 for (i = 0; i < hid->maxcollection; i++)
29282 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29283 index 4065374..10ed7dc 100644
29284 --- a/drivers/hv/channel.c
29285 +++ b/drivers/hv/channel.c
29286 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29287 int ret = 0;
29288 int t;
29289
29290 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29291 - atomic_inc(&vmbus_connection.next_gpadl_handle);
29292 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29293 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29294
29295 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29296 if (ret)
29297 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29298 index 0fb100e..baf87e5 100644
29299 --- a/drivers/hv/hv.c
29300 +++ b/drivers/hv/hv.c
29301 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29302 u64 output_address = (output) ? virt_to_phys(output) : 0;
29303 u32 output_address_hi = output_address >> 32;
29304 u32 output_address_lo = output_address & 0xFFFFFFFF;
29305 - void *hypercall_page = hv_context.hypercall_page;
29306 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29307
29308 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29309 "=a"(hv_status_lo) : "d" (control_hi),
29310 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29311 index 0aee112..b72d21f 100644
29312 --- a/drivers/hv/hyperv_vmbus.h
29313 +++ b/drivers/hv/hyperv_vmbus.h
29314 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
29315 struct vmbus_connection {
29316 enum vmbus_connect_state conn_state;
29317
29318 - atomic_t next_gpadl_handle;
29319 + atomic_unchecked_t next_gpadl_handle;
29320
29321 /*
29322 * Represents channel interrupts. Each bit position represents a
29323 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29324 index d2d0a2a..90b8f4d 100644
29325 --- a/drivers/hv/vmbus_drv.c
29326 +++ b/drivers/hv/vmbus_drv.c
29327 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29328 {
29329 int ret = 0;
29330
29331 - static atomic_t device_num = ATOMIC_INIT(0);
29332 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29333
29334 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29335 - atomic_inc_return(&device_num));
29336 + atomic_inc_return_unchecked(&device_num));
29337
29338 child_device_obj->device.bus = &hv_bus;
29339 child_device_obj->device.parent = &hv_acpi_dev->dev;
29340 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29341 index 66f6729..2d6de0a 100644
29342 --- a/drivers/hwmon/acpi_power_meter.c
29343 +++ b/drivers/hwmon/acpi_power_meter.c
29344 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29345 return res;
29346
29347 temp /= 1000;
29348 - if (temp < 0)
29349 - return -EINVAL;
29350
29351 mutex_lock(&resource->lock);
29352 resource->trip[attr->index - 7] = temp;
29353 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29354 index 5357925..6cf0418 100644
29355 --- a/drivers/hwmon/sht15.c
29356 +++ b/drivers/hwmon/sht15.c
29357 @@ -166,7 +166,7 @@ struct sht15_data {
29358 int supply_uV;
29359 bool supply_uV_valid;
29360 struct work_struct update_supply_work;
29361 - atomic_t interrupt_handled;
29362 + atomic_unchecked_t interrupt_handled;
29363 };
29364
29365 /**
29366 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29367 return ret;
29368
29369 gpio_direction_input(data->pdata->gpio_data);
29370 - atomic_set(&data->interrupt_handled, 0);
29371 + atomic_set_unchecked(&data->interrupt_handled, 0);
29372
29373 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29374 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29375 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29376 /* Only relevant if the interrupt hasn't occurred. */
29377 - if (!atomic_read(&data->interrupt_handled))
29378 + if (!atomic_read_unchecked(&data->interrupt_handled))
29379 schedule_work(&data->read_work);
29380 }
29381 ret = wait_event_timeout(data->wait_queue,
29382 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29383
29384 /* First disable the interrupt */
29385 disable_irq_nosync(irq);
29386 - atomic_inc(&data->interrupt_handled);
29387 + atomic_inc_unchecked(&data->interrupt_handled);
29388 /* Then schedule a reading work struct */
29389 if (data->state != SHT15_READING_NOTHING)
29390 schedule_work(&data->read_work);
29391 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29392 * If not, then start the interrupt again - care here as could
29393 * have gone low in meantime so verify it hasn't!
29394 */
29395 - atomic_set(&data->interrupt_handled, 0);
29396 + atomic_set_unchecked(&data->interrupt_handled, 0);
29397 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29398 /* If still not occurred or another handler has been scheduled */
29399 if (gpio_get_value(data->pdata->gpio_data)
29400 - || atomic_read(&data->interrupt_handled))
29401 + || atomic_read_unchecked(&data->interrupt_handled))
29402 return;
29403 }
29404
29405 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29406 index 378fcb5..5e91fa8 100644
29407 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
29408 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29409 @@ -43,7 +43,7 @@
29410 extern struct i2c_adapter amd756_smbus;
29411
29412 static struct i2c_adapter *s4882_adapter;
29413 -static struct i2c_algorithm *s4882_algo;
29414 +static i2c_algorithm_no_const *s4882_algo;
29415
29416 /* Wrapper access functions for multiplexed SMBus */
29417 static DEFINE_MUTEX(amd756_lock);
29418 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29419 index 29015eb..af2d8e9 100644
29420 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29421 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29422 @@ -41,7 +41,7 @@
29423 extern struct i2c_adapter *nforce2_smbus;
29424
29425 static struct i2c_adapter *s4985_adapter;
29426 -static struct i2c_algorithm *s4985_algo;
29427 +static i2c_algorithm_no_const *s4985_algo;
29428
29429 /* Wrapper access functions for multiplexed SMBus */
29430 static DEFINE_MUTEX(nforce2_lock);
29431 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29432 index d7a4833..7fae376 100644
29433 --- a/drivers/i2c/i2c-mux.c
29434 +++ b/drivers/i2c/i2c-mux.c
29435 @@ -28,7 +28,7 @@
29436 /* multiplexer per channel data */
29437 struct i2c_mux_priv {
29438 struct i2c_adapter adap;
29439 - struct i2c_algorithm algo;
29440 + i2c_algorithm_no_const algo;
29441
29442 struct i2c_adapter *parent;
29443 void *mux_dev; /* the mux chip/device */
29444 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29445 index 57d00ca..0145194 100644
29446 --- a/drivers/ide/aec62xx.c
29447 +++ b/drivers/ide/aec62xx.c
29448 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29449 .cable_detect = atp86x_cable_detect,
29450 };
29451
29452 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29453 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29454 { /* 0: AEC6210 */
29455 .name = DRV_NAME,
29456 .init_chipset = init_chipset_aec62xx,
29457 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29458 index 2c8016a..911a27c 100644
29459 --- a/drivers/ide/alim15x3.c
29460 +++ b/drivers/ide/alim15x3.c
29461 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29462 .dma_sff_read_status = ide_dma_sff_read_status,
29463 };
29464
29465 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
29466 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
29467 .name = DRV_NAME,
29468 .init_chipset = init_chipset_ali15x3,
29469 .init_hwif = init_hwif_ali15x3,
29470 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29471 index 3747b25..56fc995 100644
29472 --- a/drivers/ide/amd74xx.c
29473 +++ b/drivers/ide/amd74xx.c
29474 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29475 .udma_mask = udma, \
29476 }
29477
29478 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29479 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29480 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29481 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29482 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29483 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29484 index 15f0ead..cb43480 100644
29485 --- a/drivers/ide/atiixp.c
29486 +++ b/drivers/ide/atiixp.c
29487 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29488 .cable_detect = atiixp_cable_detect,
29489 };
29490
29491 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29492 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29493 { /* 0: IXP200/300/400/700 */
29494 .name = DRV_NAME,
29495 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29496 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29497 index 5f80312..d1fc438 100644
29498 --- a/drivers/ide/cmd64x.c
29499 +++ b/drivers/ide/cmd64x.c
29500 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29501 .dma_sff_read_status = ide_dma_sff_read_status,
29502 };
29503
29504 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29505 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29506 { /* 0: CMD643 */
29507 .name = DRV_NAME,
29508 .init_chipset = init_chipset_cmd64x,
29509 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29510 index 2c1e5f7..1444762 100644
29511 --- a/drivers/ide/cs5520.c
29512 +++ b/drivers/ide/cs5520.c
29513 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29514 .set_dma_mode = cs5520_set_dma_mode,
29515 };
29516
29517 -static const struct ide_port_info cyrix_chipset __devinitdata = {
29518 +static const struct ide_port_info cyrix_chipset __devinitconst = {
29519 .name = DRV_NAME,
29520 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29521 .port_ops = &cs5520_port_ops,
29522 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29523 index 4dc4eb9..49b40ad 100644
29524 --- a/drivers/ide/cs5530.c
29525 +++ b/drivers/ide/cs5530.c
29526 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29527 .udma_filter = cs5530_udma_filter,
29528 };
29529
29530 -static const struct ide_port_info cs5530_chipset __devinitdata = {
29531 +static const struct ide_port_info cs5530_chipset __devinitconst = {
29532 .name = DRV_NAME,
29533 .init_chipset = init_chipset_cs5530,
29534 .init_hwif = init_hwif_cs5530,
29535 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29536 index 5059faf..18d4c85 100644
29537 --- a/drivers/ide/cs5535.c
29538 +++ b/drivers/ide/cs5535.c
29539 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29540 .cable_detect = cs5535_cable_detect,
29541 };
29542
29543 -static const struct ide_port_info cs5535_chipset __devinitdata = {
29544 +static const struct ide_port_info cs5535_chipset __devinitconst = {
29545 .name = DRV_NAME,
29546 .port_ops = &cs5535_port_ops,
29547 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29548 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29549 index 847553f..3ffb49d 100644
29550 --- a/drivers/ide/cy82c693.c
29551 +++ b/drivers/ide/cy82c693.c
29552 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29553 .set_dma_mode = cy82c693_set_dma_mode,
29554 };
29555
29556 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
29557 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
29558 .name = DRV_NAME,
29559 .init_iops = init_iops_cy82c693,
29560 .port_ops = &cy82c693_port_ops,
29561 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29562 index 58c51cd..4aec3b8 100644
29563 --- a/drivers/ide/hpt366.c
29564 +++ b/drivers/ide/hpt366.c
29565 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29566 }
29567 };
29568
29569 -static const struct hpt_info hpt36x __devinitdata = {
29570 +static const struct hpt_info hpt36x __devinitconst = {
29571 .chip_name = "HPT36x",
29572 .chip_type = HPT36x,
29573 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29574 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29575 .timings = &hpt36x_timings
29576 };
29577
29578 -static const struct hpt_info hpt370 __devinitdata = {
29579 +static const struct hpt_info hpt370 __devinitconst = {
29580 .chip_name = "HPT370",
29581 .chip_type = HPT370,
29582 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29583 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29584 .timings = &hpt37x_timings
29585 };
29586
29587 -static const struct hpt_info hpt370a __devinitdata = {
29588 +static const struct hpt_info hpt370a __devinitconst = {
29589 .chip_name = "HPT370A",
29590 .chip_type = HPT370A,
29591 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29592 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29593 .timings = &hpt37x_timings
29594 };
29595
29596 -static const struct hpt_info hpt374 __devinitdata = {
29597 +static const struct hpt_info hpt374 __devinitconst = {
29598 .chip_name = "HPT374",
29599 .chip_type = HPT374,
29600 .udma_mask = ATA_UDMA5,
29601 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29602 .timings = &hpt37x_timings
29603 };
29604
29605 -static const struct hpt_info hpt372 __devinitdata = {
29606 +static const struct hpt_info hpt372 __devinitconst = {
29607 .chip_name = "HPT372",
29608 .chip_type = HPT372,
29609 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29610 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29611 .timings = &hpt37x_timings
29612 };
29613
29614 -static const struct hpt_info hpt372a __devinitdata = {
29615 +static const struct hpt_info hpt372a __devinitconst = {
29616 .chip_name = "HPT372A",
29617 .chip_type = HPT372A,
29618 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29619 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29620 .timings = &hpt37x_timings
29621 };
29622
29623 -static const struct hpt_info hpt302 __devinitdata = {
29624 +static const struct hpt_info hpt302 __devinitconst = {
29625 .chip_name = "HPT302",
29626 .chip_type = HPT302,
29627 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29628 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29629 .timings = &hpt37x_timings
29630 };
29631
29632 -static const struct hpt_info hpt371 __devinitdata = {
29633 +static const struct hpt_info hpt371 __devinitconst = {
29634 .chip_name = "HPT371",
29635 .chip_type = HPT371,
29636 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29637 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29638 .timings = &hpt37x_timings
29639 };
29640
29641 -static const struct hpt_info hpt372n __devinitdata = {
29642 +static const struct hpt_info hpt372n __devinitconst = {
29643 .chip_name = "HPT372N",
29644 .chip_type = HPT372N,
29645 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29646 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29647 .timings = &hpt37x_timings
29648 };
29649
29650 -static const struct hpt_info hpt302n __devinitdata = {
29651 +static const struct hpt_info hpt302n __devinitconst = {
29652 .chip_name = "HPT302N",
29653 .chip_type = HPT302N,
29654 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29655 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29656 .timings = &hpt37x_timings
29657 };
29658
29659 -static const struct hpt_info hpt371n __devinitdata = {
29660 +static const struct hpt_info hpt371n __devinitconst = {
29661 .chip_name = "HPT371N",
29662 .chip_type = HPT371N,
29663 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29664 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29665 .dma_sff_read_status = ide_dma_sff_read_status,
29666 };
29667
29668 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29669 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29670 { /* 0: HPT36x */
29671 .name = DRV_NAME,
29672 .init_chipset = init_chipset_hpt366,
29673 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29674 index 8126824..55a2798 100644
29675 --- a/drivers/ide/ide-cd.c
29676 +++ b/drivers/ide/ide-cd.c
29677 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29678 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29679 if ((unsigned long)buf & alignment
29680 || blk_rq_bytes(rq) & q->dma_pad_mask
29681 - || object_is_on_stack(buf))
29682 + || object_starts_on_stack(buf))
29683 drive->dma = 0;
29684 }
29685 }
29686 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29687 index a743e68..1cfd674 100644
29688 --- a/drivers/ide/ide-pci-generic.c
29689 +++ b/drivers/ide/ide-pci-generic.c
29690 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29691 .udma_mask = ATA_UDMA6, \
29692 }
29693
29694 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
29695 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
29696 /* 0: Unknown */
29697 DECLARE_GENERIC_PCI_DEV(0),
29698
29699 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29700 index 560e66d..d5dd180 100644
29701 --- a/drivers/ide/it8172.c
29702 +++ b/drivers/ide/it8172.c
29703 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29704 .set_dma_mode = it8172_set_dma_mode,
29705 };
29706
29707 -static const struct ide_port_info it8172_port_info __devinitdata = {
29708 +static const struct ide_port_info it8172_port_info __devinitconst = {
29709 .name = DRV_NAME,
29710 .port_ops = &it8172_port_ops,
29711 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29712 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29713 index 46816ba..1847aeb 100644
29714 --- a/drivers/ide/it8213.c
29715 +++ b/drivers/ide/it8213.c
29716 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29717 .cable_detect = it8213_cable_detect,
29718 };
29719
29720 -static const struct ide_port_info it8213_chipset __devinitdata = {
29721 +static const struct ide_port_info it8213_chipset __devinitconst = {
29722 .name = DRV_NAME,
29723 .enablebits = { {0x41, 0x80, 0x80} },
29724 .port_ops = &it8213_port_ops,
29725 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29726 index 2e3169f..c5611db 100644
29727 --- a/drivers/ide/it821x.c
29728 +++ b/drivers/ide/it821x.c
29729 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29730 .cable_detect = it821x_cable_detect,
29731 };
29732
29733 -static const struct ide_port_info it821x_chipset __devinitdata = {
29734 +static const struct ide_port_info it821x_chipset __devinitconst = {
29735 .name = DRV_NAME,
29736 .init_chipset = init_chipset_it821x,
29737 .init_hwif = init_hwif_it821x,
29738 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29739 index 74c2c4a..efddd7d 100644
29740 --- a/drivers/ide/jmicron.c
29741 +++ b/drivers/ide/jmicron.c
29742 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29743 .cable_detect = jmicron_cable_detect,
29744 };
29745
29746 -static const struct ide_port_info jmicron_chipset __devinitdata = {
29747 +static const struct ide_port_info jmicron_chipset __devinitconst = {
29748 .name = DRV_NAME,
29749 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29750 .port_ops = &jmicron_port_ops,
29751 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29752 index 95327a2..73f78d8 100644
29753 --- a/drivers/ide/ns87415.c
29754 +++ b/drivers/ide/ns87415.c
29755 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29756 .dma_sff_read_status = superio_dma_sff_read_status,
29757 };
29758
29759 -static const struct ide_port_info ns87415_chipset __devinitdata = {
29760 +static const struct ide_port_info ns87415_chipset __devinitconst = {
29761 .name = DRV_NAME,
29762 .init_hwif = init_hwif_ns87415,
29763 .tp_ops = &ns87415_tp_ops,
29764 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29765 index 1a53a4c..39edc66 100644
29766 --- a/drivers/ide/opti621.c
29767 +++ b/drivers/ide/opti621.c
29768 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29769 .set_pio_mode = opti621_set_pio_mode,
29770 };
29771
29772 -static const struct ide_port_info opti621_chipset __devinitdata = {
29773 +static const struct ide_port_info opti621_chipset __devinitconst = {
29774 .name = DRV_NAME,
29775 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29776 .port_ops = &opti621_port_ops,
29777 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29778 index 9546fe2..2e5ceb6 100644
29779 --- a/drivers/ide/pdc202xx_new.c
29780 +++ b/drivers/ide/pdc202xx_new.c
29781 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29782 .udma_mask = udma, \
29783 }
29784
29785 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29786 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29787 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29788 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29789 };
29790 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29791 index 3a35ec6..5634510 100644
29792 --- a/drivers/ide/pdc202xx_old.c
29793 +++ b/drivers/ide/pdc202xx_old.c
29794 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29795 .max_sectors = sectors, \
29796 }
29797
29798 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29799 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29800 { /* 0: PDC20246 */
29801 .name = DRV_NAME,
29802 .init_chipset = init_chipset_pdc202xx,
29803 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29804 index 1892e81..fe0fd60 100644
29805 --- a/drivers/ide/piix.c
29806 +++ b/drivers/ide/piix.c
29807 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29808 .udma_mask = udma, \
29809 }
29810
29811 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
29812 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
29813 /* 0: MPIIX */
29814 { /*
29815 * MPIIX actually has only a single IDE channel mapped to
29816 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29817 index a6414a8..c04173e 100644
29818 --- a/drivers/ide/rz1000.c
29819 +++ b/drivers/ide/rz1000.c
29820 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29821 }
29822 }
29823
29824 -static const struct ide_port_info rz1000_chipset __devinitdata = {
29825 +static const struct ide_port_info rz1000_chipset __devinitconst = {
29826 .name = DRV_NAME,
29827 .host_flags = IDE_HFLAG_NO_DMA,
29828 };
29829 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29830 index 356b9b5..d4758eb 100644
29831 --- a/drivers/ide/sc1200.c
29832 +++ b/drivers/ide/sc1200.c
29833 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29834 .dma_sff_read_status = ide_dma_sff_read_status,
29835 };
29836
29837 -static const struct ide_port_info sc1200_chipset __devinitdata = {
29838 +static const struct ide_port_info sc1200_chipset __devinitconst = {
29839 .name = DRV_NAME,
29840 .port_ops = &sc1200_port_ops,
29841 .dma_ops = &sc1200_dma_ops,
29842 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29843 index b7f5b0c..9701038 100644
29844 --- a/drivers/ide/scc_pata.c
29845 +++ b/drivers/ide/scc_pata.c
29846 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29847 .dma_sff_read_status = scc_dma_sff_read_status,
29848 };
29849
29850 -static const struct ide_port_info scc_chipset __devinitdata = {
29851 +static const struct ide_port_info scc_chipset __devinitconst = {
29852 .name = "sccIDE",
29853 .init_iops = init_iops_scc,
29854 .init_dma = scc_init_dma,
29855 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29856 index 35fb8da..24d72ef 100644
29857 --- a/drivers/ide/serverworks.c
29858 +++ b/drivers/ide/serverworks.c
29859 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29860 .cable_detect = svwks_cable_detect,
29861 };
29862
29863 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29864 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29865 { /* 0: OSB4 */
29866 .name = DRV_NAME,
29867 .init_chipset = init_chipset_svwks,
29868 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29869 index ddeda44..46f7e30 100644
29870 --- a/drivers/ide/siimage.c
29871 +++ b/drivers/ide/siimage.c
29872 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29873 .udma_mask = ATA_UDMA6, \
29874 }
29875
29876 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29877 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29878 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29879 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29880 };
29881 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29882 index 4a00225..09e61b4 100644
29883 --- a/drivers/ide/sis5513.c
29884 +++ b/drivers/ide/sis5513.c
29885 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29886 .cable_detect = sis_cable_detect,
29887 };
29888
29889 -static const struct ide_port_info sis5513_chipset __devinitdata = {
29890 +static const struct ide_port_info sis5513_chipset __devinitconst = {
29891 .name = DRV_NAME,
29892 .init_chipset = init_chipset_sis5513,
29893 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29894 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29895 index f21dc2a..d051cd2 100644
29896 --- a/drivers/ide/sl82c105.c
29897 +++ b/drivers/ide/sl82c105.c
29898 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29899 .dma_sff_read_status = ide_dma_sff_read_status,
29900 };
29901
29902 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
29903 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
29904 .name = DRV_NAME,
29905 .init_chipset = init_chipset_sl82c105,
29906 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29907 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29908 index 864ffe0..863a5e9 100644
29909 --- a/drivers/ide/slc90e66.c
29910 +++ b/drivers/ide/slc90e66.c
29911 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29912 .cable_detect = slc90e66_cable_detect,
29913 };
29914
29915 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
29916 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
29917 .name = DRV_NAME,
29918 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29919 .port_ops = &slc90e66_port_ops,
29920 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29921 index 4799d5c..1794678 100644
29922 --- a/drivers/ide/tc86c001.c
29923 +++ b/drivers/ide/tc86c001.c
29924 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29925 .dma_sff_read_status = ide_dma_sff_read_status,
29926 };
29927
29928 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
29929 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
29930 .name = DRV_NAME,
29931 .init_hwif = init_hwif_tc86c001,
29932 .port_ops = &tc86c001_port_ops,
29933 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29934 index 281c914..55ce1b8 100644
29935 --- a/drivers/ide/triflex.c
29936 +++ b/drivers/ide/triflex.c
29937 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29938 .set_dma_mode = triflex_set_mode,
29939 };
29940
29941 -static const struct ide_port_info triflex_device __devinitdata = {
29942 +static const struct ide_port_info triflex_device __devinitconst = {
29943 .name = DRV_NAME,
29944 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29945 .port_ops = &triflex_port_ops,
29946 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29947 index 4b42ca0..e494a98 100644
29948 --- a/drivers/ide/trm290.c
29949 +++ b/drivers/ide/trm290.c
29950 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29951 .dma_check = trm290_dma_check,
29952 };
29953
29954 -static const struct ide_port_info trm290_chipset __devinitdata = {
29955 +static const struct ide_port_info trm290_chipset __devinitconst = {
29956 .name = DRV_NAME,
29957 .init_hwif = init_hwif_trm290,
29958 .tp_ops = &trm290_tp_ops,
29959 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29960 index f46f49c..eb77678 100644
29961 --- a/drivers/ide/via82cxxx.c
29962 +++ b/drivers/ide/via82cxxx.c
29963 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29964 .cable_detect = via82cxxx_cable_detect,
29965 };
29966
29967 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29968 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29969 .name = DRV_NAME,
29970 .init_chipset = init_chipset_via82cxxx,
29971 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29972 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29973 index eb0e2cc..14241c7 100644
29974 --- a/drivers/ieee802154/fakehard.c
29975 +++ b/drivers/ieee802154/fakehard.c
29976 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29977 phy->transmit_power = 0xbf;
29978
29979 dev->netdev_ops = &fake_ops;
29980 - dev->ml_priv = &fake_mlme;
29981 + dev->ml_priv = (void *)&fake_mlme;
29982
29983 priv = netdev_priv(dev);
29984 priv->phy = phy;
29985 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29986 index 8b72f39..55df4c8 100644
29987 --- a/drivers/infiniband/core/cm.c
29988 +++ b/drivers/infiniband/core/cm.c
29989 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29990
29991 struct cm_counter_group {
29992 struct kobject obj;
29993 - atomic_long_t counter[CM_ATTR_COUNT];
29994 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29995 };
29996
29997 struct cm_counter_attribute {
29998 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29999 struct ib_mad_send_buf *msg = NULL;
30000 int ret;
30001
30002 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30003 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30004 counter[CM_REQ_COUNTER]);
30005
30006 /* Quick state check to discard duplicate REQs. */
30007 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
30008 if (!cm_id_priv)
30009 return;
30010
30011 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30012 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30013 counter[CM_REP_COUNTER]);
30014 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30015 if (ret)
30016 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
30017 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30018 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30019 spin_unlock_irq(&cm_id_priv->lock);
30020 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30021 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30022 counter[CM_RTU_COUNTER]);
30023 goto out;
30024 }
30025 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
30026 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30027 dreq_msg->local_comm_id);
30028 if (!cm_id_priv) {
30029 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30030 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30031 counter[CM_DREQ_COUNTER]);
30032 cm_issue_drep(work->port, work->mad_recv_wc);
30033 return -EINVAL;
30034 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
30035 case IB_CM_MRA_REP_RCVD:
30036 break;
30037 case IB_CM_TIMEWAIT:
30038 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30039 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30040 counter[CM_DREQ_COUNTER]);
30041 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30042 goto unlock;
30043 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
30044 cm_free_msg(msg);
30045 goto deref;
30046 case IB_CM_DREQ_RCVD:
30047 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30048 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30049 counter[CM_DREQ_COUNTER]);
30050 goto unlock;
30051 default:
30052 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
30053 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30054 cm_id_priv->msg, timeout)) {
30055 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30056 - atomic_long_inc(&work->port->
30057 + atomic_long_inc_unchecked(&work->port->
30058 counter_group[CM_RECV_DUPLICATES].
30059 counter[CM_MRA_COUNTER]);
30060 goto out;
30061 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
30062 break;
30063 case IB_CM_MRA_REQ_RCVD:
30064 case IB_CM_MRA_REP_RCVD:
30065 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30066 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30067 counter[CM_MRA_COUNTER]);
30068 /* fall through */
30069 default:
30070 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
30071 case IB_CM_LAP_IDLE:
30072 break;
30073 case IB_CM_MRA_LAP_SENT:
30074 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30075 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30076 counter[CM_LAP_COUNTER]);
30077 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30078 goto unlock;
30079 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
30080 cm_free_msg(msg);
30081 goto deref;
30082 case IB_CM_LAP_RCVD:
30083 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30084 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30085 counter[CM_LAP_COUNTER]);
30086 goto unlock;
30087 default:
30088 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
30089 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30090 if (cur_cm_id_priv) {
30091 spin_unlock_irq(&cm.lock);
30092 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30093 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30094 counter[CM_SIDR_REQ_COUNTER]);
30095 goto out; /* Duplicate message. */
30096 }
30097 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
30098 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30099 msg->retries = 1;
30100
30101 - atomic_long_add(1 + msg->retries,
30102 + atomic_long_add_unchecked(1 + msg->retries,
30103 &port->counter_group[CM_XMIT].counter[attr_index]);
30104 if (msg->retries)
30105 - atomic_long_add(msg->retries,
30106 + atomic_long_add_unchecked(msg->retries,
30107 &port->counter_group[CM_XMIT_RETRIES].
30108 counter[attr_index]);
30109
30110 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
30111 }
30112
30113 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30114 - atomic_long_inc(&port->counter_group[CM_RECV].
30115 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30116 counter[attr_id - CM_ATTR_ID_OFFSET]);
30117
30118 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30119 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
30120 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30121
30122 return sprintf(buf, "%ld\n",
30123 - atomic_long_read(&group->counter[cm_attr->index]));
30124 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30125 }
30126
30127 static const struct sysfs_ops cm_counter_ops = {
30128 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
30129 index 176c8f9..2627b62 100644
30130 --- a/drivers/infiniband/core/fmr_pool.c
30131 +++ b/drivers/infiniband/core/fmr_pool.c
30132 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
30133
30134 struct task_struct *thread;
30135
30136 - atomic_t req_ser;
30137 - atomic_t flush_ser;
30138 + atomic_unchecked_t req_ser;
30139 + atomic_unchecked_t flush_ser;
30140
30141 wait_queue_head_t force_wait;
30142 };
30143 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30144 struct ib_fmr_pool *pool = pool_ptr;
30145
30146 do {
30147 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30148 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30149 ib_fmr_batch_release(pool);
30150
30151 - atomic_inc(&pool->flush_ser);
30152 + atomic_inc_unchecked(&pool->flush_ser);
30153 wake_up_interruptible(&pool->force_wait);
30154
30155 if (pool->flush_function)
30156 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30157 }
30158
30159 set_current_state(TASK_INTERRUPTIBLE);
30160 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30161 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30162 !kthread_should_stop())
30163 schedule();
30164 __set_current_state(TASK_RUNNING);
30165 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
30166 pool->dirty_watermark = params->dirty_watermark;
30167 pool->dirty_len = 0;
30168 spin_lock_init(&pool->pool_lock);
30169 - atomic_set(&pool->req_ser, 0);
30170 - atomic_set(&pool->flush_ser, 0);
30171 + atomic_set_unchecked(&pool->req_ser, 0);
30172 + atomic_set_unchecked(&pool->flush_ser, 0);
30173 init_waitqueue_head(&pool->force_wait);
30174
30175 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30176 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
30177 }
30178 spin_unlock_irq(&pool->pool_lock);
30179
30180 - serial = atomic_inc_return(&pool->req_ser);
30181 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30182 wake_up_process(pool->thread);
30183
30184 if (wait_event_interruptible(pool->force_wait,
30185 - atomic_read(&pool->flush_ser) - serial >= 0))
30186 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30187 return -EINTR;
30188
30189 return 0;
30190 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
30191 } else {
30192 list_add_tail(&fmr->list, &pool->dirty_list);
30193 if (++pool->dirty_len >= pool->dirty_watermark) {
30194 - atomic_inc(&pool->req_ser);
30195 + atomic_inc_unchecked(&pool->req_ser);
30196 wake_up_process(pool->thread);
30197 }
30198 }
30199 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30200 index 40c8353..946b0e4 100644
30201 --- a/drivers/infiniband/hw/cxgb4/mem.c
30202 +++ b/drivers/infiniband/hw/cxgb4/mem.c
30203 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30204 int err;
30205 struct fw_ri_tpte tpt;
30206 u32 stag_idx;
30207 - static atomic_t key;
30208 + static atomic_unchecked_t key;
30209
30210 if (c4iw_fatal_error(rdev))
30211 return -EIO;
30212 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30213 &rdev->resource.tpt_fifo_lock);
30214 if (!stag_idx)
30215 return -ENOMEM;
30216 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30217 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30218 }
30219 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30220 __func__, stag_state, type, pdid, stag_idx);
30221 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30222 index 79b3dbc..96e5fcc 100644
30223 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
30224 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30225 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30226 struct ib_atomic_eth *ateth;
30227 struct ipath_ack_entry *e;
30228 u64 vaddr;
30229 - atomic64_t *maddr;
30230 + atomic64_unchecked_t *maddr;
30231 u64 sdata;
30232 u32 rkey;
30233 u8 next;
30234 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30235 IB_ACCESS_REMOTE_ATOMIC)))
30236 goto nack_acc_unlck;
30237 /* Perform atomic OP and save result. */
30238 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30239 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30240 sdata = be64_to_cpu(ateth->swap_data);
30241 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30242 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30243 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30244 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30245 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30246 be64_to_cpu(ateth->compare_data),
30247 sdata);
30248 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30249 index 1f95bba..9530f87 100644
30250 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30251 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30252 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30253 unsigned long flags;
30254 struct ib_wc wc;
30255 u64 sdata;
30256 - atomic64_t *maddr;
30257 + atomic64_unchecked_t *maddr;
30258 enum ib_wc_status send_status;
30259
30260 /*
30261 @@ -382,11 +382,11 @@ again:
30262 IB_ACCESS_REMOTE_ATOMIC)))
30263 goto acc_err;
30264 /* Perform atomic OP and save result. */
30265 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30266 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30267 sdata = wqe->wr.wr.atomic.compare_add;
30268 *(u64 *) sqp->s_sge.sge.vaddr =
30269 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30270 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30271 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30272 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30273 sdata, wqe->wr.wr.atomic.swap);
30274 goto send_comp;
30275 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30276 index 5965b3d..16817fb 100644
30277 --- a/drivers/infiniband/hw/nes/nes.c
30278 +++ b/drivers/infiniband/hw/nes/nes.c
30279 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30280 LIST_HEAD(nes_adapter_list);
30281 static LIST_HEAD(nes_dev_list);
30282
30283 -atomic_t qps_destroyed;
30284 +atomic_unchecked_t qps_destroyed;
30285
30286 static unsigned int ee_flsh_adapter;
30287 static unsigned int sysfs_nonidx_addr;
30288 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30289 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30290 struct nes_adapter *nesadapter = nesdev->nesadapter;
30291
30292 - atomic_inc(&qps_destroyed);
30293 + atomic_inc_unchecked(&qps_destroyed);
30294
30295 /* Free the control structures */
30296
30297 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30298 index 568b4f1..5ea3eff 100644
30299 --- a/drivers/infiniband/hw/nes/nes.h
30300 +++ b/drivers/infiniband/hw/nes/nes.h
30301 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30302 extern unsigned int wqm_quanta;
30303 extern struct list_head nes_adapter_list;
30304
30305 -extern atomic_t cm_connects;
30306 -extern atomic_t cm_accepts;
30307 -extern atomic_t cm_disconnects;
30308 -extern atomic_t cm_closes;
30309 -extern atomic_t cm_connecteds;
30310 -extern atomic_t cm_connect_reqs;
30311 -extern atomic_t cm_rejects;
30312 -extern atomic_t mod_qp_timouts;
30313 -extern atomic_t qps_created;
30314 -extern atomic_t qps_destroyed;
30315 -extern atomic_t sw_qps_destroyed;
30316 +extern atomic_unchecked_t cm_connects;
30317 +extern atomic_unchecked_t cm_accepts;
30318 +extern atomic_unchecked_t cm_disconnects;
30319 +extern atomic_unchecked_t cm_closes;
30320 +extern atomic_unchecked_t cm_connecteds;
30321 +extern atomic_unchecked_t cm_connect_reqs;
30322 +extern atomic_unchecked_t cm_rejects;
30323 +extern atomic_unchecked_t mod_qp_timouts;
30324 +extern atomic_unchecked_t qps_created;
30325 +extern atomic_unchecked_t qps_destroyed;
30326 +extern atomic_unchecked_t sw_qps_destroyed;
30327 extern u32 mh_detected;
30328 extern u32 mh_pauses_sent;
30329 extern u32 cm_packets_sent;
30330 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30331 extern u32 cm_packets_received;
30332 extern u32 cm_packets_dropped;
30333 extern u32 cm_packets_retrans;
30334 -extern atomic_t cm_listens_created;
30335 -extern atomic_t cm_listens_destroyed;
30336 +extern atomic_unchecked_t cm_listens_created;
30337 +extern atomic_unchecked_t cm_listens_destroyed;
30338 extern u32 cm_backlog_drops;
30339 -extern atomic_t cm_loopbacks;
30340 -extern atomic_t cm_nodes_created;
30341 -extern atomic_t cm_nodes_destroyed;
30342 -extern atomic_t cm_accel_dropped_pkts;
30343 -extern atomic_t cm_resets_recvd;
30344 -extern atomic_t pau_qps_created;
30345 -extern atomic_t pau_qps_destroyed;
30346 +extern atomic_unchecked_t cm_loopbacks;
30347 +extern atomic_unchecked_t cm_nodes_created;
30348 +extern atomic_unchecked_t cm_nodes_destroyed;
30349 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30350 +extern atomic_unchecked_t cm_resets_recvd;
30351 +extern atomic_unchecked_t pau_qps_created;
30352 +extern atomic_unchecked_t pau_qps_destroyed;
30353
30354 extern u32 int_mod_timer_init;
30355 extern u32 int_mod_cq_depth_256;
30356 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30357 index 0a52d72..0642f36 100644
30358 --- a/drivers/infiniband/hw/nes/nes_cm.c
30359 +++ b/drivers/infiniband/hw/nes/nes_cm.c
30360 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30361 u32 cm_packets_retrans;
30362 u32 cm_packets_created;
30363 u32 cm_packets_received;
30364 -atomic_t cm_listens_created;
30365 -atomic_t cm_listens_destroyed;
30366 +atomic_unchecked_t cm_listens_created;
30367 +atomic_unchecked_t cm_listens_destroyed;
30368 u32 cm_backlog_drops;
30369 -atomic_t cm_loopbacks;
30370 -atomic_t cm_nodes_created;
30371 -atomic_t cm_nodes_destroyed;
30372 -atomic_t cm_accel_dropped_pkts;
30373 -atomic_t cm_resets_recvd;
30374 +atomic_unchecked_t cm_loopbacks;
30375 +atomic_unchecked_t cm_nodes_created;
30376 +atomic_unchecked_t cm_nodes_destroyed;
30377 +atomic_unchecked_t cm_accel_dropped_pkts;
30378 +atomic_unchecked_t cm_resets_recvd;
30379
30380 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30381 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30382 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30383
30384 static struct nes_cm_core *g_cm_core;
30385
30386 -atomic_t cm_connects;
30387 -atomic_t cm_accepts;
30388 -atomic_t cm_disconnects;
30389 -atomic_t cm_closes;
30390 -atomic_t cm_connecteds;
30391 -atomic_t cm_connect_reqs;
30392 -atomic_t cm_rejects;
30393 +atomic_unchecked_t cm_connects;
30394 +atomic_unchecked_t cm_accepts;
30395 +atomic_unchecked_t cm_disconnects;
30396 +atomic_unchecked_t cm_closes;
30397 +atomic_unchecked_t cm_connecteds;
30398 +atomic_unchecked_t cm_connect_reqs;
30399 +atomic_unchecked_t cm_rejects;
30400
30401 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30402 {
30403 @@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30404 kfree(listener);
30405 listener = NULL;
30406 ret = 0;
30407 - atomic_inc(&cm_listens_destroyed);
30408 + atomic_inc_unchecked(&cm_listens_destroyed);
30409 } else {
30410 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30411 }
30412 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30413 cm_node->rem_mac);
30414
30415 add_hte_node(cm_core, cm_node);
30416 - atomic_inc(&cm_nodes_created);
30417 + atomic_inc_unchecked(&cm_nodes_created);
30418
30419 return cm_node;
30420 }
30421 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30422 }
30423
30424 atomic_dec(&cm_core->node_cnt);
30425 - atomic_inc(&cm_nodes_destroyed);
30426 + atomic_inc_unchecked(&cm_nodes_destroyed);
30427 nesqp = cm_node->nesqp;
30428 if (nesqp) {
30429 nesqp->cm_node = NULL;
30430 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30431
30432 static void drop_packet(struct sk_buff *skb)
30433 {
30434 - atomic_inc(&cm_accel_dropped_pkts);
30435 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30436 dev_kfree_skb_any(skb);
30437 }
30438
30439 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30440 {
30441
30442 int reset = 0; /* whether to send reset in case of err.. */
30443 - atomic_inc(&cm_resets_recvd);
30444 + atomic_inc_unchecked(&cm_resets_recvd);
30445 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30446 " refcnt=%d\n", cm_node, cm_node->state,
30447 atomic_read(&cm_node->ref_count));
30448 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30449 rem_ref_cm_node(cm_node->cm_core, cm_node);
30450 return NULL;
30451 }
30452 - atomic_inc(&cm_loopbacks);
30453 + atomic_inc_unchecked(&cm_loopbacks);
30454 loopbackremotenode->loopbackpartner = cm_node;
30455 loopbackremotenode->tcp_cntxt.rcv_wscale =
30456 NES_CM_DEFAULT_RCV_WND_SCALE;
30457 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30458 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30459 else {
30460 rem_ref_cm_node(cm_core, cm_node);
30461 - atomic_inc(&cm_accel_dropped_pkts);
30462 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30463 dev_kfree_skb_any(skb);
30464 }
30465 break;
30466 @@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30467
30468 if ((cm_id) && (cm_id->event_handler)) {
30469 if (issue_disconn) {
30470 - atomic_inc(&cm_disconnects);
30471 + atomic_inc_unchecked(&cm_disconnects);
30472 cm_event.event = IW_CM_EVENT_DISCONNECT;
30473 cm_event.status = disconn_status;
30474 cm_event.local_addr = cm_id->local_addr;
30475 @@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30476 }
30477
30478 if (issue_close) {
30479 - atomic_inc(&cm_closes);
30480 + atomic_inc_unchecked(&cm_closes);
30481 nes_disconnect(nesqp, 1);
30482
30483 cm_id->provider_data = nesqp;
30484 @@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30485
30486 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30487 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30488 - atomic_inc(&cm_accepts);
30489 + atomic_inc_unchecked(&cm_accepts);
30490
30491 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30492 netdev_refcnt_read(nesvnic->netdev));
30493 @@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30494 struct nes_cm_core *cm_core;
30495 u8 *start_buff;
30496
30497 - atomic_inc(&cm_rejects);
30498 + atomic_inc_unchecked(&cm_rejects);
30499 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30500 loopback = cm_node->loopbackpartner;
30501 cm_core = cm_node->cm_core;
30502 @@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30503 ntohl(cm_id->local_addr.sin_addr.s_addr),
30504 ntohs(cm_id->local_addr.sin_port));
30505
30506 - atomic_inc(&cm_connects);
30507 + atomic_inc_unchecked(&cm_connects);
30508 nesqp->active_conn = 1;
30509
30510 /* cache the cm_id in the qp */
30511 @@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30512 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30513 return err;
30514 }
30515 - atomic_inc(&cm_listens_created);
30516 + atomic_inc_unchecked(&cm_listens_created);
30517 }
30518
30519 cm_id->add_ref(cm_id);
30520 @@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30521
30522 if (nesqp->destroyed)
30523 return;
30524 - atomic_inc(&cm_connecteds);
30525 + atomic_inc_unchecked(&cm_connecteds);
30526 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30527 " local port 0x%04X. jiffies = %lu.\n",
30528 nesqp->hwqp.qp_id,
30529 @@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30530
30531 cm_id->add_ref(cm_id);
30532 ret = cm_id->event_handler(cm_id, &cm_event);
30533 - atomic_inc(&cm_closes);
30534 + atomic_inc_unchecked(&cm_closes);
30535 cm_event.event = IW_CM_EVENT_CLOSE;
30536 cm_event.status = 0;
30537 cm_event.provider_data = cm_id->provider_data;
30538 @@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30539 return;
30540 cm_id = cm_node->cm_id;
30541
30542 - atomic_inc(&cm_connect_reqs);
30543 + atomic_inc_unchecked(&cm_connect_reqs);
30544 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30545 cm_node, cm_id, jiffies);
30546
30547 @@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30548 return;
30549 cm_id = cm_node->cm_id;
30550
30551 - atomic_inc(&cm_connect_reqs);
30552 + atomic_inc_unchecked(&cm_connect_reqs);
30553 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30554 cm_node, cm_id, jiffies);
30555
30556 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30557 index b3b2a24..7bfaf1e 100644
30558 --- a/drivers/infiniband/hw/nes/nes_mgt.c
30559 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
30560 @@ -40,8 +40,8 @@
30561 #include "nes.h"
30562 #include "nes_mgt.h"
30563
30564 -atomic_t pau_qps_created;
30565 -atomic_t pau_qps_destroyed;
30566 +atomic_unchecked_t pau_qps_created;
30567 +atomic_unchecked_t pau_qps_destroyed;
30568
30569 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30570 {
30571 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30572 {
30573 struct sk_buff *skb;
30574 unsigned long flags;
30575 - atomic_inc(&pau_qps_destroyed);
30576 + atomic_inc_unchecked(&pau_qps_destroyed);
30577
30578 /* Free packets that have not yet been forwarded */
30579 /* Lock is acquired by skb_dequeue when removing the skb */
30580 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30581 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30582 skb_queue_head_init(&nesqp->pau_list);
30583 spin_lock_init(&nesqp->pau_lock);
30584 - atomic_inc(&pau_qps_created);
30585 + atomic_inc_unchecked(&pau_qps_created);
30586 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30587 }
30588
30589 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30590 index c00d2f3..8834298 100644
30591 --- a/drivers/infiniband/hw/nes/nes_nic.c
30592 +++ b/drivers/infiniband/hw/nes/nes_nic.c
30593 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30594 target_stat_values[++index] = mh_detected;
30595 target_stat_values[++index] = mh_pauses_sent;
30596 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30597 - target_stat_values[++index] = atomic_read(&cm_connects);
30598 - target_stat_values[++index] = atomic_read(&cm_accepts);
30599 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30600 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30601 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30602 - target_stat_values[++index] = atomic_read(&cm_rejects);
30603 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30604 - target_stat_values[++index] = atomic_read(&qps_created);
30605 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30606 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30607 - target_stat_values[++index] = atomic_read(&cm_closes);
30608 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30609 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30610 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30611 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30612 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30613 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30614 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30615 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30616 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30617 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30618 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30619 target_stat_values[++index] = cm_packets_sent;
30620 target_stat_values[++index] = cm_packets_bounced;
30621 target_stat_values[++index] = cm_packets_created;
30622 target_stat_values[++index] = cm_packets_received;
30623 target_stat_values[++index] = cm_packets_dropped;
30624 target_stat_values[++index] = cm_packets_retrans;
30625 - target_stat_values[++index] = atomic_read(&cm_listens_created);
30626 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30627 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30628 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30629 target_stat_values[++index] = cm_backlog_drops;
30630 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30631 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30632 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30633 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30634 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30635 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30636 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30637 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30638 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30639 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30640 target_stat_values[++index] = nesadapter->free_4kpbl;
30641 target_stat_values[++index] = nesadapter->free_256pbl;
30642 target_stat_values[++index] = int_mod_timer_init;
30643 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30644 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30645 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30646 - target_stat_values[++index] = atomic_read(&pau_qps_created);
30647 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30648 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30649 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30650 }
30651
30652 /**
30653 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30654 index 5095bc4..41e8fff 100644
30655 --- a/drivers/infiniband/hw/nes/nes_verbs.c
30656 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
30657 @@ -46,9 +46,9 @@
30658
30659 #include <rdma/ib_umem.h>
30660
30661 -atomic_t mod_qp_timouts;
30662 -atomic_t qps_created;
30663 -atomic_t sw_qps_destroyed;
30664 +atomic_unchecked_t mod_qp_timouts;
30665 +atomic_unchecked_t qps_created;
30666 +atomic_unchecked_t sw_qps_destroyed;
30667
30668 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30669
30670 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30671 if (init_attr->create_flags)
30672 return ERR_PTR(-EINVAL);
30673
30674 - atomic_inc(&qps_created);
30675 + atomic_inc_unchecked(&qps_created);
30676 switch (init_attr->qp_type) {
30677 case IB_QPT_RC:
30678 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30679 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30680 struct iw_cm_event cm_event;
30681 int ret = 0;
30682
30683 - atomic_inc(&sw_qps_destroyed);
30684 + atomic_inc_unchecked(&sw_qps_destroyed);
30685 nesqp->destroyed = 1;
30686
30687 /* Blow away the connection if it exists. */
30688 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30689 index b881bdc..c2e360c 100644
30690 --- a/drivers/infiniband/hw/qib/qib.h
30691 +++ b/drivers/infiniband/hw/qib/qib.h
30692 @@ -51,6 +51,7 @@
30693 #include <linux/completion.h>
30694 #include <linux/kref.h>
30695 #include <linux/sched.h>
30696 +#include <linux/slab.h>
30697
30698 #include "qib_common.h"
30699 #include "qib_verbs.h"
30700 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30701 index c351aa4..e6967c2 100644
30702 --- a/drivers/input/gameport/gameport.c
30703 +++ b/drivers/input/gameport/gameport.c
30704 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30705 */
30706 static void gameport_init_port(struct gameport *gameport)
30707 {
30708 - static atomic_t gameport_no = ATOMIC_INIT(0);
30709 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30710
30711 __module_get(THIS_MODULE);
30712
30713 mutex_init(&gameport->drv_mutex);
30714 device_initialize(&gameport->dev);
30715 dev_set_name(&gameport->dev, "gameport%lu",
30716 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
30717 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30718 gameport->dev.bus = &gameport_bus;
30719 gameport->dev.release = gameport_release_port;
30720 if (gameport->parent)
30721 diff --git a/drivers/input/input.c b/drivers/input/input.c
30722 index da38d97..2aa0b79 100644
30723 --- a/drivers/input/input.c
30724 +++ b/drivers/input/input.c
30725 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30726 */
30727 int input_register_device(struct input_dev *dev)
30728 {
30729 - static atomic_t input_no = ATOMIC_INIT(0);
30730 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30731 struct input_handler *handler;
30732 const char *path;
30733 int error;
30734 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30735 dev->setkeycode = input_default_setkeycode;
30736
30737 dev_set_name(&dev->dev, "input%ld",
30738 - (unsigned long) atomic_inc_return(&input_no) - 1);
30739 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30740
30741 error = device_add(&dev->dev);
30742 if (error)
30743 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30744 index b8d8611..7a4a04b 100644
30745 --- a/drivers/input/joystick/sidewinder.c
30746 +++ b/drivers/input/joystick/sidewinder.c
30747 @@ -30,6 +30,7 @@
30748 #include <linux/kernel.h>
30749 #include <linux/module.h>
30750 #include <linux/slab.h>
30751 +#include <linux/sched.h>
30752 #include <linux/init.h>
30753 #include <linux/input.h>
30754 #include <linux/gameport.h>
30755 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30756 index d728875..844c89b 100644
30757 --- a/drivers/input/joystick/xpad.c
30758 +++ b/drivers/input/joystick/xpad.c
30759 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30760
30761 static int xpad_led_probe(struct usb_xpad *xpad)
30762 {
30763 - static atomic_t led_seq = ATOMIC_INIT(0);
30764 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30765 long led_no;
30766 struct xpad_led *led;
30767 struct led_classdev *led_cdev;
30768 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30769 if (!led)
30770 return -ENOMEM;
30771
30772 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30773 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30774
30775 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30776 led->xpad = xpad;
30777 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30778 index 0110b5a..d3ad144 100644
30779 --- a/drivers/input/mousedev.c
30780 +++ b/drivers/input/mousedev.c
30781 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30782
30783 spin_unlock_irq(&client->packet_lock);
30784
30785 - if (copy_to_user(buffer, data, count))
30786 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
30787 return -EFAULT;
30788
30789 return count;
30790 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30791 index ba70058..571d25d 100644
30792 --- a/drivers/input/serio/serio.c
30793 +++ b/drivers/input/serio/serio.c
30794 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30795 */
30796 static void serio_init_port(struct serio *serio)
30797 {
30798 - static atomic_t serio_no = ATOMIC_INIT(0);
30799 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30800
30801 __module_get(THIS_MODULE);
30802
30803 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30804 mutex_init(&serio->drv_mutex);
30805 device_initialize(&serio->dev);
30806 dev_set_name(&serio->dev, "serio%ld",
30807 - (long)atomic_inc_return(&serio_no) - 1);
30808 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30809 serio->dev.bus = &serio_bus;
30810 serio->dev.release = serio_release_port;
30811 serio->dev.groups = serio_device_attr_groups;
30812 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30813 index e44933d..9ba484a 100644
30814 --- a/drivers/isdn/capi/capi.c
30815 +++ b/drivers/isdn/capi/capi.c
30816 @@ -83,8 +83,8 @@ struct capiminor {
30817
30818 struct capi20_appl *ap;
30819 u32 ncci;
30820 - atomic_t datahandle;
30821 - atomic_t msgid;
30822 + atomic_unchecked_t datahandle;
30823 + atomic_unchecked_t msgid;
30824
30825 struct tty_port port;
30826 int ttyinstop;
30827 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30828 capimsg_setu16(s, 2, mp->ap->applid);
30829 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30830 capimsg_setu8 (s, 5, CAPI_RESP);
30831 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30832 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30833 capimsg_setu32(s, 8, mp->ncci);
30834 capimsg_setu16(s, 12, datahandle);
30835 }
30836 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30837 mp->outbytes -= len;
30838 spin_unlock_bh(&mp->outlock);
30839
30840 - datahandle = atomic_inc_return(&mp->datahandle);
30841 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30842 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30843 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30844 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30845 capimsg_setu16(skb->data, 2, mp->ap->applid);
30846 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30847 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30848 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30849 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30850 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30851 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30852 capimsg_setu16(skb->data, 16, len); /* Data length */
30853 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30854 index db621db..825ea1a 100644
30855 --- a/drivers/isdn/gigaset/common.c
30856 +++ b/drivers/isdn/gigaset/common.c
30857 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30858 cs->commands_pending = 0;
30859 cs->cur_at_seq = 0;
30860 cs->gotfwver = -1;
30861 - cs->open_count = 0;
30862 + local_set(&cs->open_count, 0);
30863 cs->dev = NULL;
30864 cs->tty = NULL;
30865 cs->tty_dev = NULL;
30866 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30867 index 212efaf..f187c6b 100644
30868 --- a/drivers/isdn/gigaset/gigaset.h
30869 +++ b/drivers/isdn/gigaset/gigaset.h
30870 @@ -35,6 +35,7 @@
30871 #include <linux/tty_driver.h>
30872 #include <linux/list.h>
30873 #include <linux/atomic.h>
30874 +#include <asm/local.h>
30875
30876 #define GIG_VERSION {0, 5, 0, 0}
30877 #define GIG_COMPAT {0, 4, 0, 0}
30878 @@ -433,7 +434,7 @@ struct cardstate {
30879 spinlock_t cmdlock;
30880 unsigned curlen, cmdbytes;
30881
30882 - unsigned open_count;
30883 + local_t open_count;
30884 struct tty_struct *tty;
30885 struct tasklet_struct if_wake_tasklet;
30886 unsigned control_state;
30887 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30888 index ee0a549..a7c9798 100644
30889 --- a/drivers/isdn/gigaset/interface.c
30890 +++ b/drivers/isdn/gigaset/interface.c
30891 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30892 }
30893 tty->driver_data = cs;
30894
30895 - ++cs->open_count;
30896 -
30897 - if (cs->open_count == 1) {
30898 + if (local_inc_return(&cs->open_count) == 1) {
30899 spin_lock_irqsave(&cs->lock, flags);
30900 cs->tty = tty;
30901 spin_unlock_irqrestore(&cs->lock, flags);
30902 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30903
30904 if (!cs->connected)
30905 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30906 - else if (!cs->open_count)
30907 + else if (!local_read(&cs->open_count))
30908 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30909 else {
30910 - if (!--cs->open_count) {
30911 + if (!local_dec_return(&cs->open_count)) {
30912 spin_lock_irqsave(&cs->lock, flags);
30913 cs->tty = NULL;
30914 spin_unlock_irqrestore(&cs->lock, flags);
30915 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30916 if (!cs->connected) {
30917 gig_dbg(DEBUG_IF, "not connected");
30918 retval = -ENODEV;
30919 - } else if (!cs->open_count)
30920 + } else if (!local_read(&cs->open_count))
30921 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30922 else {
30923 retval = 0;
30924 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30925 retval = -ENODEV;
30926 goto done;
30927 }
30928 - if (!cs->open_count) {
30929 + if (!local_read(&cs->open_count)) {
30930 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30931 retval = -ENODEV;
30932 goto done;
30933 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30934 if (!cs->connected) {
30935 gig_dbg(DEBUG_IF, "not connected");
30936 retval = -ENODEV;
30937 - } else if (!cs->open_count)
30938 + } else if (!local_read(&cs->open_count))
30939 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30940 else if (cs->mstate != MS_LOCKED) {
30941 dev_warn(cs->dev, "can't write to unlocked device\n");
30942 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30943
30944 if (!cs->connected)
30945 gig_dbg(DEBUG_IF, "not connected");
30946 - else if (!cs->open_count)
30947 + else if (!local_read(&cs->open_count))
30948 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30949 else if (cs->mstate != MS_LOCKED)
30950 dev_warn(cs->dev, "can't write to unlocked device\n");
30951 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30952
30953 if (!cs->connected)
30954 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30955 - else if (!cs->open_count)
30956 + else if (!local_read(&cs->open_count))
30957 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30958 else
30959 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30960 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30961
30962 if (!cs->connected)
30963 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30964 - else if (!cs->open_count)
30965 + else if (!local_read(&cs->open_count))
30966 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30967 else
30968 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30969 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30970 goto out;
30971 }
30972
30973 - if (!cs->open_count) {
30974 + if (!local_read(&cs->open_count)) {
30975 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30976 goto out;
30977 }
30978 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30979 index 2a57da59..e7a12ed 100644
30980 --- a/drivers/isdn/hardware/avm/b1.c
30981 +++ b/drivers/isdn/hardware/avm/b1.c
30982 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30983 }
30984 if (left) {
30985 if (t4file->user) {
30986 - if (copy_from_user(buf, dp, left))
30987 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30988 return -EFAULT;
30989 } else {
30990 memcpy(buf, dp, left);
30991 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30992 }
30993 if (left) {
30994 if (config->user) {
30995 - if (copy_from_user(buf, dp, left))
30996 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30997 return -EFAULT;
30998 } else {
30999 memcpy(buf, dp, left);
31000 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
31001 index 85784a7..a19ca98 100644
31002 --- a/drivers/isdn/hardware/eicon/divasync.h
31003 +++ b/drivers/isdn/hardware/eicon/divasync.h
31004 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31005 } diva_didd_add_adapter_t;
31006 typedef struct _diva_didd_remove_adapter {
31007 IDI_CALL p_request;
31008 -} diva_didd_remove_adapter_t;
31009 +} __no_const diva_didd_remove_adapter_t;
31010 typedef struct _diva_didd_read_adapter_array {
31011 void * buffer;
31012 dword length;
31013 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
31014 index a3bd163..8956575 100644
31015 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
31016 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
31017 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31018 typedef struct _diva_os_idi_adapter_interface {
31019 diva_init_card_proc_t cleanup_adapter_proc;
31020 diva_cmd_card_proc_t cmd_proc;
31021 -} diva_os_idi_adapter_interface_t;
31022 +} __no_const diva_os_idi_adapter_interface_t;
31023
31024 typedef struct _diva_os_xdi_adapter {
31025 struct list_head link;
31026 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
31027 index 2339d73..802ab87 100644
31028 --- a/drivers/isdn/i4l/isdn_net.c
31029 +++ b/drivers/isdn/i4l/isdn_net.c
31030 @@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
31031 {
31032 isdn_net_local *lp = netdev_priv(dev);
31033 unsigned char *p;
31034 - ushort len = 0;
31035 + int len = 0;
31036
31037 switch (lp->p_encap) {
31038 case ISDN_NET_ENCAP_ETHER:
31039 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
31040 index 1f355bb..43f1fea 100644
31041 --- a/drivers/isdn/icn/icn.c
31042 +++ b/drivers/isdn/icn/icn.c
31043 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
31044 if (count > len)
31045 count = len;
31046 if (user) {
31047 - if (copy_from_user(msg, buf, count))
31048 + if (count > sizeof msg || copy_from_user(msg, buf, count))
31049 return -EFAULT;
31050 } else
31051 memcpy(msg, buf, count);
31052 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
31053 index b5fdcb7..5b6c59f 100644
31054 --- a/drivers/lguest/core.c
31055 +++ b/drivers/lguest/core.c
31056 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
31057 * it's worked so far. The end address needs +1 because __get_vm_area
31058 * allocates an extra guard page, so we need space for that.
31059 */
31060 +
31061 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31062 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31063 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31064 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31065 +#else
31066 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31067 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31068 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31069 +#endif
31070 +
31071 if (!switcher_vma) {
31072 err = -ENOMEM;
31073 printk("lguest: could not map switcher pages high\n");
31074 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
31075 * Now the Switcher is mapped at the right address, we can't fail!
31076 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
31077 */
31078 - memcpy(switcher_vma->addr, start_switcher_text,
31079 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31080 end_switcher_text - start_switcher_text);
31081
31082 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31083 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
31084 index 65af42f..530c87a 100644
31085 --- a/drivers/lguest/x86/core.c
31086 +++ b/drivers/lguest/x86/core.c
31087 @@ -59,7 +59,7 @@ static struct {
31088 /* Offset from where switcher.S was compiled to where we've copied it */
31089 static unsigned long switcher_offset(void)
31090 {
31091 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31092 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31093 }
31094
31095 /* This cpu's struct lguest_pages. */
31096 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
31097 * These copies are pretty cheap, so we do them unconditionally: */
31098 /* Save the current Host top-level page directory.
31099 */
31100 +
31101 +#ifdef CONFIG_PAX_PER_CPU_PGD
31102 + pages->state.host_cr3 = read_cr3();
31103 +#else
31104 pages->state.host_cr3 = __pa(current->mm->pgd);
31105 +#endif
31106 +
31107 /*
31108 * Set up the Guest's page tables to see this CPU's pages (and no
31109 * other CPU's pages).
31110 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
31111 * compiled-in switcher code and the high-mapped copy we just made.
31112 */
31113 for (i = 0; i < IDT_ENTRIES; i++)
31114 - default_idt_entries[i] += switcher_offset();
31115 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31116
31117 /*
31118 * Set up the Switcher's per-cpu areas.
31119 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
31120 * it will be undisturbed when we switch. To change %cs and jump we
31121 * need this structure to feed to Intel's "lcall" instruction.
31122 */
31123 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31124 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31125 lguest_entry.segment = LGUEST_CS;
31126
31127 /*
31128 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
31129 index 40634b0..4f5855e 100644
31130 --- a/drivers/lguest/x86/switcher_32.S
31131 +++ b/drivers/lguest/x86/switcher_32.S
31132 @@ -87,6 +87,7 @@
31133 #include <asm/page.h>
31134 #include <asm/segment.h>
31135 #include <asm/lguest.h>
31136 +#include <asm/processor-flags.h>
31137
31138 // We mark the start of the code to copy
31139 // It's placed in .text tho it's never run here
31140 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31141 // Changes type when we load it: damn Intel!
31142 // For after we switch over our page tables
31143 // That entry will be read-only: we'd crash.
31144 +
31145 +#ifdef CONFIG_PAX_KERNEXEC
31146 + mov %cr0, %edx
31147 + xor $X86_CR0_WP, %edx
31148 + mov %edx, %cr0
31149 +#endif
31150 +
31151 movl $(GDT_ENTRY_TSS*8), %edx
31152 ltr %dx
31153
31154 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31155 // Let's clear it again for our return.
31156 // The GDT descriptor of the Host
31157 // Points to the table after two "size" bytes
31158 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31159 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31160 // Clear "used" from type field (byte 5, bit 2)
31161 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31162 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31163 +
31164 +#ifdef CONFIG_PAX_KERNEXEC
31165 + mov %cr0, %eax
31166 + xor $X86_CR0_WP, %eax
31167 + mov %eax, %cr0
31168 +#endif
31169
31170 // Once our page table's switched, the Guest is live!
31171 // The Host fades as we run this final step.
31172 @@ -295,13 +309,12 @@ deliver_to_host:
31173 // I consulted gcc, and it gave
31174 // These instructions, which I gladly credit:
31175 leal (%edx,%ebx,8), %eax
31176 - movzwl (%eax),%edx
31177 - movl 4(%eax), %eax
31178 - xorw %ax, %ax
31179 - orl %eax, %edx
31180 + movl 4(%eax), %edx
31181 + movw (%eax), %dx
31182 // Now the address of the handler's in %edx
31183 // We call it now: its "iret" drops us home.
31184 - jmp *%edx
31185 + ljmp $__KERNEL_CS, $1f
31186 +1: jmp *%edx
31187
31188 // Every interrupt can come to us here
31189 // But we must truly tell each apart.
31190 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
31191 index 4daf9e5..b8d1d0f 100644
31192 --- a/drivers/macintosh/macio_asic.c
31193 +++ b/drivers/macintosh/macio_asic.c
31194 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
31195 * MacIO is matched against any Apple ID, it's probe() function
31196 * will then decide wether it applies or not
31197 */
31198 -static const struct pci_device_id __devinitdata pci_ids [] = { {
31199 +static const struct pci_device_id __devinitconst pci_ids [] = { {
31200 .vendor = PCI_VENDOR_ID_APPLE,
31201 .device = PCI_ANY_ID,
31202 .subvendor = PCI_ANY_ID,
31203 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31204 index 31c2dc2..a2de7a6 100644
31205 --- a/drivers/md/dm-ioctl.c
31206 +++ b/drivers/md/dm-ioctl.c
31207 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
31208 cmd == DM_LIST_VERSIONS_CMD)
31209 return 0;
31210
31211 - if ((cmd == DM_DEV_CREATE_CMD)) {
31212 + if (cmd == DM_DEV_CREATE_CMD) {
31213 if (!*param->name) {
31214 DMWARN("name not supplied when creating device");
31215 return -EINVAL;
31216 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31217 index 9bfd057..01180bc 100644
31218 --- a/drivers/md/dm-raid1.c
31219 +++ b/drivers/md/dm-raid1.c
31220 @@ -40,7 +40,7 @@ enum dm_raid1_error {
31221
31222 struct mirror {
31223 struct mirror_set *ms;
31224 - atomic_t error_count;
31225 + atomic_unchecked_t error_count;
31226 unsigned long error_type;
31227 struct dm_dev *dev;
31228 sector_t offset;
31229 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
31230 struct mirror *m;
31231
31232 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31233 - if (!atomic_read(&m->error_count))
31234 + if (!atomic_read_unchecked(&m->error_count))
31235 return m;
31236
31237 return NULL;
31238 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31239 * simple way to tell if a device has encountered
31240 * errors.
31241 */
31242 - atomic_inc(&m->error_count);
31243 + atomic_inc_unchecked(&m->error_count);
31244
31245 if (test_and_set_bit(error_type, &m->error_type))
31246 return;
31247 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31248 struct mirror *m = get_default_mirror(ms);
31249
31250 do {
31251 - if (likely(!atomic_read(&m->error_count)))
31252 + if (likely(!atomic_read_unchecked(&m->error_count)))
31253 return m;
31254
31255 if (m-- == ms->mirror)
31256 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31257 {
31258 struct mirror *default_mirror = get_default_mirror(m->ms);
31259
31260 - return !atomic_read(&default_mirror->error_count);
31261 + return !atomic_read_unchecked(&default_mirror->error_count);
31262 }
31263
31264 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31265 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31266 */
31267 if (likely(region_in_sync(ms, region, 1)))
31268 m = choose_mirror(ms, bio->bi_sector);
31269 - else if (m && atomic_read(&m->error_count))
31270 + else if (m && atomic_read_unchecked(&m->error_count))
31271 m = NULL;
31272
31273 if (likely(m))
31274 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31275 }
31276
31277 ms->mirror[mirror].ms = ms;
31278 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31279 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31280 ms->mirror[mirror].error_type = 0;
31281 ms->mirror[mirror].offset = offset;
31282
31283 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31284 */
31285 static char device_status_char(struct mirror *m)
31286 {
31287 - if (!atomic_read(&(m->error_count)))
31288 + if (!atomic_read_unchecked(&(m->error_count)))
31289 return 'A';
31290
31291 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31292 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31293 index 3d80cf0..b77cc47 100644
31294 --- a/drivers/md/dm-stripe.c
31295 +++ b/drivers/md/dm-stripe.c
31296 @@ -20,7 +20,7 @@ struct stripe {
31297 struct dm_dev *dev;
31298 sector_t physical_start;
31299
31300 - atomic_t error_count;
31301 + atomic_unchecked_t error_count;
31302 };
31303
31304 struct stripe_c {
31305 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31306 kfree(sc);
31307 return r;
31308 }
31309 - atomic_set(&(sc->stripe[i].error_count), 0);
31310 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31311 }
31312
31313 ti->private = sc;
31314 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31315 DMEMIT("%d ", sc->stripes);
31316 for (i = 0; i < sc->stripes; i++) {
31317 DMEMIT("%s ", sc->stripe[i].dev->name);
31318 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31319 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31320 'D' : 'A';
31321 }
31322 buffer[i] = '\0';
31323 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31324 */
31325 for (i = 0; i < sc->stripes; i++)
31326 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31327 - atomic_inc(&(sc->stripe[i].error_count));
31328 - if (atomic_read(&(sc->stripe[i].error_count)) <
31329 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31330 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31331 DM_IO_ERROR_THRESHOLD)
31332 schedule_work(&sc->trigger_event);
31333 }
31334 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31335 index 8e91321..fd17aef 100644
31336 --- a/drivers/md/dm-table.c
31337 +++ b/drivers/md/dm-table.c
31338 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31339 if (!dev_size)
31340 return 0;
31341
31342 - if ((start >= dev_size) || (start + len > dev_size)) {
31343 + if ((start >= dev_size) || (len > dev_size - start)) {
31344 DMWARN("%s: %s too small for target: "
31345 "start=%llu, len=%llu, dev_size=%llu",
31346 dm_device_name(ti->table->md), bdevname(bdev, b),
31347 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31348 index 59c4f04..4c7b661 100644
31349 --- a/drivers/md/dm-thin-metadata.c
31350 +++ b/drivers/md/dm-thin-metadata.c
31351 @@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31352
31353 pmd->info.tm = tm;
31354 pmd->info.levels = 2;
31355 - pmd->info.value_type.context = pmd->data_sm;
31356 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31357 pmd->info.value_type.size = sizeof(__le64);
31358 pmd->info.value_type.inc = data_block_inc;
31359 pmd->info.value_type.dec = data_block_dec;
31360 @@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31361
31362 pmd->bl_info.tm = tm;
31363 pmd->bl_info.levels = 1;
31364 - pmd->bl_info.value_type.context = pmd->data_sm;
31365 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31366 pmd->bl_info.value_type.size = sizeof(__le64);
31367 pmd->bl_info.value_type.inc = data_block_inc;
31368 pmd->bl_info.value_type.dec = data_block_dec;
31369 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31370 index 4720f68..78d1df7 100644
31371 --- a/drivers/md/dm.c
31372 +++ b/drivers/md/dm.c
31373 @@ -177,9 +177,9 @@ struct mapped_device {
31374 /*
31375 * Event handling.
31376 */
31377 - atomic_t event_nr;
31378 + atomic_unchecked_t event_nr;
31379 wait_queue_head_t eventq;
31380 - atomic_t uevent_seq;
31381 + atomic_unchecked_t uevent_seq;
31382 struct list_head uevent_list;
31383 spinlock_t uevent_lock; /* Protect access to uevent_list */
31384
31385 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31386 rwlock_init(&md->map_lock);
31387 atomic_set(&md->holders, 1);
31388 atomic_set(&md->open_count, 0);
31389 - atomic_set(&md->event_nr, 0);
31390 - atomic_set(&md->uevent_seq, 0);
31391 + atomic_set_unchecked(&md->event_nr, 0);
31392 + atomic_set_unchecked(&md->uevent_seq, 0);
31393 INIT_LIST_HEAD(&md->uevent_list);
31394 spin_lock_init(&md->uevent_lock);
31395
31396 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31397
31398 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31399
31400 - atomic_inc(&md->event_nr);
31401 + atomic_inc_unchecked(&md->event_nr);
31402 wake_up(&md->eventq);
31403 }
31404
31405 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31406
31407 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31408 {
31409 - return atomic_add_return(1, &md->uevent_seq);
31410 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31411 }
31412
31413 uint32_t dm_get_event_nr(struct mapped_device *md)
31414 {
31415 - return atomic_read(&md->event_nr);
31416 + return atomic_read_unchecked(&md->event_nr);
31417 }
31418
31419 int dm_wait_event(struct mapped_device *md, int event_nr)
31420 {
31421 return wait_event_interruptible(md->eventq,
31422 - (event_nr != atomic_read(&md->event_nr)));
31423 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31424 }
31425
31426 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31427 diff --git a/drivers/md/md.c b/drivers/md/md.c
31428 index f47f1f8..b7f559e 100644
31429 --- a/drivers/md/md.c
31430 +++ b/drivers/md/md.c
31431 @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31432 * start build, activate spare
31433 */
31434 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31435 -static atomic_t md_event_count;
31436 +static atomic_unchecked_t md_event_count;
31437 void md_new_event(struct mddev *mddev)
31438 {
31439 - atomic_inc(&md_event_count);
31440 + atomic_inc_unchecked(&md_event_count);
31441 wake_up(&md_event_waiters);
31442 }
31443 EXPORT_SYMBOL_GPL(md_new_event);
31444 @@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31445 */
31446 static void md_new_event_inintr(struct mddev *mddev)
31447 {
31448 - atomic_inc(&md_event_count);
31449 + atomic_inc_unchecked(&md_event_count);
31450 wake_up(&md_event_waiters);
31451 }
31452
31453 @@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31454
31455 rdev->preferred_minor = 0xffff;
31456 rdev->data_offset = le64_to_cpu(sb->data_offset);
31457 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31458 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31459
31460 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31461 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31462 @@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31463 else
31464 sb->resync_offset = cpu_to_le64(0);
31465
31466 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31467 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31468
31469 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31470 sb->size = cpu_to_le64(mddev->dev_sectors);
31471 @@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31472 static ssize_t
31473 errors_show(struct md_rdev *rdev, char *page)
31474 {
31475 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31476 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31477 }
31478
31479 static ssize_t
31480 @@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31481 char *e;
31482 unsigned long n = simple_strtoul(buf, &e, 10);
31483 if (*buf && (*e == 0 || *e == '\n')) {
31484 - atomic_set(&rdev->corrected_errors, n);
31485 + atomic_set_unchecked(&rdev->corrected_errors, n);
31486 return len;
31487 }
31488 return -EINVAL;
31489 @@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31490 rdev->sb_loaded = 0;
31491 rdev->bb_page = NULL;
31492 atomic_set(&rdev->nr_pending, 0);
31493 - atomic_set(&rdev->read_errors, 0);
31494 - atomic_set(&rdev->corrected_errors, 0);
31495 + atomic_set_unchecked(&rdev->read_errors, 0);
31496 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31497
31498 INIT_LIST_HEAD(&rdev->same_set);
31499 init_waitqueue_head(&rdev->blocked_wait);
31500 @@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31501
31502 spin_unlock(&pers_lock);
31503 seq_printf(seq, "\n");
31504 - seq->poll_event = atomic_read(&md_event_count);
31505 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31506 return 0;
31507 }
31508 if (v == (void*)2) {
31509 @@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31510 chunk_kb ? "KB" : "B");
31511 if (bitmap->file) {
31512 seq_printf(seq, ", file: ");
31513 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31514 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31515 }
31516
31517 seq_printf(seq, "\n");
31518 @@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31519 return error;
31520
31521 seq = file->private_data;
31522 - seq->poll_event = atomic_read(&md_event_count);
31523 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31524 return error;
31525 }
31526
31527 @@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31528 /* always allow read */
31529 mask = POLLIN | POLLRDNORM;
31530
31531 - if (seq->poll_event != atomic_read(&md_event_count))
31532 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31533 mask |= POLLERR | POLLPRI;
31534 return mask;
31535 }
31536 @@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31537 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31538 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31539 (int)part_stat_read(&disk->part0, sectors[1]) -
31540 - atomic_read(&disk->sync_io);
31541 + atomic_read_unchecked(&disk->sync_io);
31542 /* sync IO will cause sync_io to increase before the disk_stats
31543 * as sync_io is counted when a request starts, and
31544 * disk_stats is counted when it completes.
31545 diff --git a/drivers/md/md.h b/drivers/md/md.h
31546 index cf742d9..7c7c745 100644
31547 --- a/drivers/md/md.h
31548 +++ b/drivers/md/md.h
31549 @@ -120,13 +120,13 @@ struct md_rdev {
31550 * only maintained for arrays that
31551 * support hot removal
31552 */
31553 - atomic_t read_errors; /* number of consecutive read errors that
31554 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31555 * we have tried to ignore.
31556 */
31557 struct timespec last_read_error; /* monotonic time since our
31558 * last read error
31559 */
31560 - atomic_t corrected_errors; /* number of corrected read errors,
31561 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31562 * for reporting to userspace and storing
31563 * in superblock.
31564 */
31565 @@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31566
31567 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31568 {
31569 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31570 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31571 }
31572
31573 struct md_personality
31574 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31575 index 50ed53b..4f29d7d 100644
31576 --- a/drivers/md/persistent-data/dm-space-map-checker.c
31577 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
31578 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31579 /*----------------------------------------------------------------*/
31580
31581 struct sm_checker {
31582 - struct dm_space_map sm;
31583 + dm_space_map_no_const sm;
31584
31585 struct count_array old_counts;
31586 struct count_array counts;
31587 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31588 index fc469ba..2d91555 100644
31589 --- a/drivers/md/persistent-data/dm-space-map-disk.c
31590 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
31591 @@ -23,7 +23,7 @@
31592 * Space map interface.
31593 */
31594 struct sm_disk {
31595 - struct dm_space_map sm;
31596 + dm_space_map_no_const sm;
31597
31598 struct ll_disk ll;
31599 struct ll_disk old_ll;
31600 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31601 index e89ae5e..062e4c2 100644
31602 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
31603 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31604 @@ -43,7 +43,7 @@ struct block_op {
31605 };
31606
31607 struct sm_metadata {
31608 - struct dm_space_map sm;
31609 + dm_space_map_no_const sm;
31610
31611 struct ll_disk ll;
31612 struct ll_disk old_ll;
31613 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31614 index 1cbfc6b..56e1dbb 100644
31615 --- a/drivers/md/persistent-data/dm-space-map.h
31616 +++ b/drivers/md/persistent-data/dm-space-map.h
31617 @@ -60,6 +60,7 @@ struct dm_space_map {
31618 int (*root_size)(struct dm_space_map *sm, size_t *result);
31619 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31620 };
31621 +typedef struct dm_space_map __no_const dm_space_map_no_const;
31622
31623 /*----------------------------------------------------------------*/
31624
31625 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31626 index 7d9e071..015b1d5 100644
31627 --- a/drivers/md/raid1.c
31628 +++ b/drivers/md/raid1.c
31629 @@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31630 if (r1_sync_page_io(rdev, sect, s,
31631 bio->bi_io_vec[idx].bv_page,
31632 READ) != 0)
31633 - atomic_add(s, &rdev->corrected_errors);
31634 + atomic_add_unchecked(s, &rdev->corrected_errors);
31635 }
31636 sectors -= s;
31637 sect += s;
31638 @@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31639 test_bit(In_sync, &rdev->flags)) {
31640 if (r1_sync_page_io(rdev, sect, s,
31641 conf->tmppage, READ)) {
31642 - atomic_add(s, &rdev->corrected_errors);
31643 + atomic_add_unchecked(s, &rdev->corrected_errors);
31644 printk(KERN_INFO
31645 "md/raid1:%s: read error corrected "
31646 "(%d sectors at %llu on %s)\n",
31647 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31648 index 685ddf3..955b087 100644
31649 --- a/drivers/md/raid10.c
31650 +++ b/drivers/md/raid10.c
31651 @@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31652 /* The write handler will notice the lack of
31653 * R10BIO_Uptodate and record any errors etc
31654 */
31655 - atomic_add(r10_bio->sectors,
31656 + atomic_add_unchecked(r10_bio->sectors,
31657 &conf->mirrors[d].rdev->corrected_errors);
31658
31659 /* for reconstruct, we always reschedule after a read.
31660 @@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31661 {
31662 struct timespec cur_time_mon;
31663 unsigned long hours_since_last;
31664 - unsigned int read_errors = atomic_read(&rdev->read_errors);
31665 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31666
31667 ktime_get_ts(&cur_time_mon);
31668
31669 @@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31670 * overflowing the shift of read_errors by hours_since_last.
31671 */
31672 if (hours_since_last >= 8 * sizeof(read_errors))
31673 - atomic_set(&rdev->read_errors, 0);
31674 + atomic_set_unchecked(&rdev->read_errors, 0);
31675 else
31676 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31677 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31678 }
31679
31680 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31681 @@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31682 return;
31683
31684 check_decay_read_errors(mddev, rdev);
31685 - atomic_inc(&rdev->read_errors);
31686 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
31687 + atomic_inc_unchecked(&rdev->read_errors);
31688 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31689 char b[BDEVNAME_SIZE];
31690 bdevname(rdev->bdev, b);
31691
31692 @@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31693 "md/raid10:%s: %s: Raid device exceeded "
31694 "read_error threshold [cur %d:max %d]\n",
31695 mdname(mddev), b,
31696 - atomic_read(&rdev->read_errors), max_read_errors);
31697 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31698 printk(KERN_NOTICE
31699 "md/raid10:%s: %s: Failing raid device\n",
31700 mdname(mddev), b);
31701 @@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31702 (unsigned long long)(
31703 sect + rdev->data_offset),
31704 bdevname(rdev->bdev, b));
31705 - atomic_add(s, &rdev->corrected_errors);
31706 + atomic_add_unchecked(s, &rdev->corrected_errors);
31707 }
31708
31709 rdev_dec_pending(rdev, mddev);
31710 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31711 index 858fdbb..b2dac95 100644
31712 --- a/drivers/md/raid5.c
31713 +++ b/drivers/md/raid5.c
31714 @@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31715 (unsigned long long)(sh->sector
31716 + rdev->data_offset),
31717 bdevname(rdev->bdev, b));
31718 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31719 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31720 clear_bit(R5_ReadError, &sh->dev[i].flags);
31721 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31722 }
31723 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31724 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31725 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31726 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31727 } else {
31728 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31729 int retry = 0;
31730 rdev = conf->disks[i].rdev;
31731
31732 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31733 - atomic_inc(&rdev->read_errors);
31734 + atomic_inc_unchecked(&rdev->read_errors);
31735 if (conf->mddev->degraded >= conf->max_degraded)
31736 printk_ratelimited(
31737 KERN_WARNING
31738 @@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31739 (unsigned long long)(sh->sector
31740 + rdev->data_offset),
31741 bdn);
31742 - else if (atomic_read(&rdev->read_errors)
31743 + else if (atomic_read_unchecked(&rdev->read_errors)
31744 > conf->max_nr_stripes)
31745 printk(KERN_WARNING
31746 "md/raid:%s: Too many read errors, failing device %s.\n",
31747 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31748 index ba9a643..e474ab5 100644
31749 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31750 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31751 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31752 .subvendor = _subvend, .subdevice = _subdev, \
31753 .driver_data = (unsigned long)&_driverdata }
31754
31755 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31756 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31757 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31758 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31759 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31760 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31761 index a7d876f..8c21b61 100644
31762 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
31763 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31764 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
31765 union {
31766 dmx_ts_cb ts;
31767 dmx_section_cb sec;
31768 - } cb;
31769 + } __no_const cb;
31770
31771 struct dvb_demux *demux;
31772 void *priv;
31773 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31774 index f732877..d38c35a 100644
31775 --- a/drivers/media/dvb/dvb-core/dvbdev.c
31776 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
31777 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31778 const struct dvb_device *template, void *priv, int type)
31779 {
31780 struct dvb_device *dvbdev;
31781 - struct file_operations *dvbdevfops;
31782 + file_operations_no_const *dvbdevfops;
31783 struct device *clsdev;
31784 int minor;
31785 int id;
31786 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31787 index 9f2a02c..5920f88 100644
31788 --- a/drivers/media/dvb/dvb-usb/cxusb.c
31789 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
31790 @@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31791 struct dib0700_adapter_state {
31792 int (*set_param_save) (struct dvb_frontend *,
31793 struct dvb_frontend_parameters *);
31794 -};
31795 +} __no_const;
31796
31797 static int dib7070_set_param_override(struct dvb_frontend *fe,
31798 struct dvb_frontend_parameters *fep)
31799 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31800 index f103ec1..5e8968b 100644
31801 --- a/drivers/media/dvb/dvb-usb/dw2102.c
31802 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
31803 @@ -95,7 +95,7 @@ struct su3000_state {
31804
31805 struct s6x0_state {
31806 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31807 -};
31808 +} __no_const;
31809
31810 /* debug */
31811 static int dvb_usb_dw2102_debug;
31812 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31813 index 404f63a..4796533 100644
31814 --- a/drivers/media/dvb/frontends/dib3000.h
31815 +++ b/drivers/media/dvb/frontends/dib3000.h
31816 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31817 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31818 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31819 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31820 -};
31821 +} __no_const;
31822
31823 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31824 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31825 diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31826 index 90bf573..e8463da 100644
31827 --- a/drivers/media/dvb/frontends/ds3000.c
31828 +++ b/drivers/media/dvb/frontends/ds3000.c
31829 @@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31830
31831 for (i = 0; i < 30 ; i++) {
31832 ds3000_read_status(fe, &status);
31833 - if (status && FE_HAS_LOCK)
31834 + if (status & FE_HAS_LOCK)
31835 break;
31836
31837 msleep(10);
31838 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31839 index 0564192..75b16f5 100644
31840 --- a/drivers/media/dvb/ngene/ngene-cards.c
31841 +++ b/drivers/media/dvb/ngene/ngene-cards.c
31842 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31843
31844 /****************************************************************************/
31845
31846 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31847 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31848 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31849 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31850 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31851 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31852 index 16a089f..ab1667d 100644
31853 --- a/drivers/media/radio/radio-cadet.c
31854 +++ b/drivers/media/radio/radio-cadet.c
31855 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31856 unsigned char readbuf[RDS_BUFFER];
31857 int i = 0;
31858
31859 + if (count > RDS_BUFFER)
31860 + return -EFAULT;
31861 mutex_lock(&dev->lock);
31862 if (dev->rdsstat == 0) {
31863 dev->rdsstat = 1;
31864 diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31865 index 61287fc..8b08712 100644
31866 --- a/drivers/media/rc/redrat3.c
31867 +++ b/drivers/media/rc/redrat3.c
31868 @@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31869 return carrier;
31870 }
31871
31872 -static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31873 +static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31874 {
31875 struct redrat3_dev *rr3 = rcdev->priv;
31876 struct device *dev = rr3->dev;
31877 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31878 index 9cde353..8c6a1c3 100644
31879 --- a/drivers/media/video/au0828/au0828.h
31880 +++ b/drivers/media/video/au0828/au0828.h
31881 @@ -191,7 +191,7 @@ struct au0828_dev {
31882
31883 /* I2C */
31884 struct i2c_adapter i2c_adap;
31885 - struct i2c_algorithm i2c_algo;
31886 + i2c_algorithm_no_const i2c_algo;
31887 struct i2c_client i2c_client;
31888 u32 i2c_rc;
31889
31890 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31891 index 68d1240..46b32eb 100644
31892 --- a/drivers/media/video/cx88/cx88-alsa.c
31893 +++ b/drivers/media/video/cx88/cx88-alsa.c
31894 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31895 * Only boards with eeprom and byte 1 at eeprom=1 have it
31896 */
31897
31898 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31899 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31900 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31901 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31902 {0, }
31903 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31904 index 305e6aa..0143317 100644
31905 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31906 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31907 @@ -196,7 +196,7 @@ struct pvr2_hdw {
31908
31909 /* I2C stuff */
31910 struct i2c_adapter i2c_adap;
31911 - struct i2c_algorithm i2c_algo;
31912 + i2c_algorithm_no_const i2c_algo;
31913 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31914 int i2c_cx25840_hack_state;
31915 int i2c_linked;
31916 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31917 index a0895bf..b7ebb1b 100644
31918 --- a/drivers/media/video/timblogiw.c
31919 +++ b/drivers/media/video/timblogiw.c
31920 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31921
31922 /* Platform device functions */
31923
31924 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31925 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31926 .vidioc_querycap = timblogiw_querycap,
31927 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31928 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31929 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31930 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31931 };
31932
31933 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31934 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31935 .owner = THIS_MODULE,
31936 .open = timblogiw_open,
31937 .release = timblogiw_close,
31938 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31939 index e9c6a60..daf6a33 100644
31940 --- a/drivers/message/fusion/mptbase.c
31941 +++ b/drivers/message/fusion/mptbase.c
31942 @@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31943 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31944 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31945
31946 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31947 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31948 +#else
31949 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31950 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31951 +#endif
31952 +
31953 /*
31954 * Rounding UP to nearest 4-kB boundary here...
31955 */
31956 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31957 index 9d95042..b808101 100644
31958 --- a/drivers/message/fusion/mptsas.c
31959 +++ b/drivers/message/fusion/mptsas.c
31960 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31961 return 0;
31962 }
31963
31964 +static inline void
31965 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31966 +{
31967 + if (phy_info->port_details) {
31968 + phy_info->port_details->rphy = rphy;
31969 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31970 + ioc->name, rphy));
31971 + }
31972 +
31973 + if (rphy) {
31974 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31975 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31976 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31977 + ioc->name, rphy, rphy->dev.release));
31978 + }
31979 +}
31980 +
31981 /* no mutex */
31982 static void
31983 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31984 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31985 return NULL;
31986 }
31987
31988 -static inline void
31989 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31990 -{
31991 - if (phy_info->port_details) {
31992 - phy_info->port_details->rphy = rphy;
31993 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31994 - ioc->name, rphy));
31995 - }
31996 -
31997 - if (rphy) {
31998 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31999 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32000 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32001 - ioc->name, rphy, rphy->dev.release));
32002 - }
32003 -}
32004 -
32005 static inline struct sas_port *
32006 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32007 {
32008 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
32009 index 0c3ced7..1fe34ec 100644
32010 --- a/drivers/message/fusion/mptscsih.c
32011 +++ b/drivers/message/fusion/mptscsih.c
32012 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32013
32014 h = shost_priv(SChost);
32015
32016 - if (h) {
32017 - if (h->info_kbuf == NULL)
32018 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32019 - return h->info_kbuf;
32020 - h->info_kbuf[0] = '\0';
32021 + if (!h)
32022 + return NULL;
32023
32024 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32025 - h->info_kbuf[size-1] = '\0';
32026 - }
32027 + if (h->info_kbuf == NULL)
32028 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32029 + return h->info_kbuf;
32030 + h->info_kbuf[0] = '\0';
32031 +
32032 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32033 + h->info_kbuf[size-1] = '\0';
32034
32035 return h->info_kbuf;
32036 }
32037 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
32038 index 07dbeaf..5533142 100644
32039 --- a/drivers/message/i2o/i2o_proc.c
32040 +++ b/drivers/message/i2o/i2o_proc.c
32041 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
32042 "Array Controller Device"
32043 };
32044
32045 -static char *chtostr(u8 * chars, int n)
32046 -{
32047 - char tmp[256];
32048 - tmp[0] = 0;
32049 - return strncat(tmp, (char *)chars, n);
32050 -}
32051 -
32052 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32053 char *group)
32054 {
32055 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
32056
32057 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32058 seq_printf(seq, "%-#8x", ddm_table.module_id);
32059 - seq_printf(seq, "%-29s",
32060 - chtostr(ddm_table.module_name_version, 28));
32061 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32062 seq_printf(seq, "%9d ", ddm_table.data_size);
32063 seq_printf(seq, "%8d", ddm_table.code_size);
32064
32065 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
32066
32067 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32068 seq_printf(seq, "%-#8x", dst->module_id);
32069 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32070 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32071 + seq_printf(seq, "%-.28s", dst->module_name_version);
32072 + seq_printf(seq, "%-.8s", dst->date);
32073 seq_printf(seq, "%8d ", dst->module_size);
32074 seq_printf(seq, "%8d ", dst->mpb_size);
32075 seq_printf(seq, "0x%04x", dst->module_flags);
32076 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
32077 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32078 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32079 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32080 - seq_printf(seq, "Vendor info : %s\n",
32081 - chtostr((u8 *) (work32 + 2), 16));
32082 - seq_printf(seq, "Product info : %s\n",
32083 - chtostr((u8 *) (work32 + 6), 16));
32084 - seq_printf(seq, "Description : %s\n",
32085 - chtostr((u8 *) (work32 + 10), 16));
32086 - seq_printf(seq, "Product rev. : %s\n",
32087 - chtostr((u8 *) (work32 + 14), 8));
32088 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32089 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32090 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32091 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32092
32093 seq_printf(seq, "Serial number : ");
32094 print_serial_number(seq, (u8 *) (work32 + 16),
32095 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
32096 }
32097
32098 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32099 - seq_printf(seq, "Module name : %s\n",
32100 - chtostr(result.module_name, 24));
32101 - seq_printf(seq, "Module revision : %s\n",
32102 - chtostr(result.module_rev, 8));
32103 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
32104 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32105
32106 seq_printf(seq, "Serial number : ");
32107 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32108 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
32109 return 0;
32110 }
32111
32112 - seq_printf(seq, "Device name : %s\n",
32113 - chtostr(result.device_name, 64));
32114 - seq_printf(seq, "Service name : %s\n",
32115 - chtostr(result.service_name, 64));
32116 - seq_printf(seq, "Physical name : %s\n",
32117 - chtostr(result.physical_location, 64));
32118 - seq_printf(seq, "Instance number : %s\n",
32119 - chtostr(result.instance_number, 4));
32120 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32121 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32122 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32123 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32124
32125 return 0;
32126 }
32127 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
32128 index a8c08f3..155fe3d 100644
32129 --- a/drivers/message/i2o/iop.c
32130 +++ b/drivers/message/i2o/iop.c
32131 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
32132
32133 spin_lock_irqsave(&c->context_list_lock, flags);
32134
32135 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32136 - atomic_inc(&c->context_list_counter);
32137 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32138 + atomic_inc_unchecked(&c->context_list_counter);
32139
32140 - entry->context = atomic_read(&c->context_list_counter);
32141 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32142
32143 list_add(&entry->list, &c->context_list);
32144
32145 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
32146
32147 #if BITS_PER_LONG == 64
32148 spin_lock_init(&c->context_list_lock);
32149 - atomic_set(&c->context_list_counter, 0);
32150 + atomic_set_unchecked(&c->context_list_counter, 0);
32151 INIT_LIST_HEAD(&c->context_list);
32152 #endif
32153
32154 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
32155 index 7ce65f4..e66e9bc 100644
32156 --- a/drivers/mfd/abx500-core.c
32157 +++ b/drivers/mfd/abx500-core.c
32158 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
32159
32160 struct abx500_device_entry {
32161 struct list_head list;
32162 - struct abx500_ops ops;
32163 + abx500_ops_no_const ops;
32164 struct device *dev;
32165 };
32166
32167 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
32168 index 5c2a06a..8fa077c 100644
32169 --- a/drivers/mfd/janz-cmodio.c
32170 +++ b/drivers/mfd/janz-cmodio.c
32171 @@ -13,6 +13,7 @@
32172
32173 #include <linux/kernel.h>
32174 #include <linux/module.h>
32175 +#include <linux/slab.h>
32176 #include <linux/init.h>
32177 #include <linux/pci.h>
32178 #include <linux/interrupt.h>
32179 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
32180 index 29d12a7..f900ba4 100644
32181 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
32182 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
32183 @@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
32184 * the lid is closed. This leads to interrupts as soon as a little move
32185 * is done.
32186 */
32187 - atomic_inc(&lis3->count);
32188 + atomic_inc_unchecked(&lis3->count);
32189
32190 wake_up_interruptible(&lis3->misc_wait);
32191 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
32192 @@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
32193 if (lis3->pm_dev)
32194 pm_runtime_get_sync(lis3->pm_dev);
32195
32196 - atomic_set(&lis3->count, 0);
32197 + atomic_set_unchecked(&lis3->count, 0);
32198 return 0;
32199 }
32200
32201 @@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32202 add_wait_queue(&lis3->misc_wait, &wait);
32203 while (true) {
32204 set_current_state(TASK_INTERRUPTIBLE);
32205 - data = atomic_xchg(&lis3->count, 0);
32206 + data = atomic_xchg_unchecked(&lis3->count, 0);
32207 if (data)
32208 break;
32209
32210 @@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32211 struct lis3lv02d, miscdev);
32212
32213 poll_wait(file, &lis3->misc_wait, wait);
32214 - if (atomic_read(&lis3->count))
32215 + if (atomic_read_unchecked(&lis3->count))
32216 return POLLIN | POLLRDNORM;
32217 return 0;
32218 }
32219 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32220 index 2b1482a..5d33616 100644
32221 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
32222 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32223 @@ -266,7 +266,7 @@ struct lis3lv02d {
32224 struct input_polled_dev *idev; /* input device */
32225 struct platform_device *pdev; /* platform device */
32226 struct regulator_bulk_data regulators[2];
32227 - atomic_t count; /* interrupt count after last read */
32228 + atomic_unchecked_t count; /* interrupt count after last read */
32229 union axis_conversion ac; /* hw -> logical axis */
32230 int mapped_btns[3];
32231
32232 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32233 index 2f30bad..c4c13d0 100644
32234 --- a/drivers/misc/sgi-gru/gruhandles.c
32235 +++ b/drivers/misc/sgi-gru/gruhandles.c
32236 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32237 unsigned long nsec;
32238
32239 nsec = CLKS2NSEC(clks);
32240 - atomic_long_inc(&mcs_op_statistics[op].count);
32241 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
32242 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32243 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32244 if (mcs_op_statistics[op].max < nsec)
32245 mcs_op_statistics[op].max = nsec;
32246 }
32247 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32248 index 7768b87..f8aac38 100644
32249 --- a/drivers/misc/sgi-gru/gruprocfs.c
32250 +++ b/drivers/misc/sgi-gru/gruprocfs.c
32251 @@ -32,9 +32,9 @@
32252
32253 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32254
32255 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32256 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32257 {
32258 - unsigned long val = atomic_long_read(v);
32259 + unsigned long val = atomic_long_read_unchecked(v);
32260
32261 seq_printf(s, "%16lu %s\n", val, id);
32262 }
32263 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32264
32265 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32266 for (op = 0; op < mcsop_last; op++) {
32267 - count = atomic_long_read(&mcs_op_statistics[op].count);
32268 - total = atomic_long_read(&mcs_op_statistics[op].total);
32269 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32270 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32271 max = mcs_op_statistics[op].max;
32272 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32273 count ? total / count : 0, max);
32274 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32275 index 5c3ce24..4915ccb 100644
32276 --- a/drivers/misc/sgi-gru/grutables.h
32277 +++ b/drivers/misc/sgi-gru/grutables.h
32278 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32279 * GRU statistics.
32280 */
32281 struct gru_stats_s {
32282 - atomic_long_t vdata_alloc;
32283 - atomic_long_t vdata_free;
32284 - atomic_long_t gts_alloc;
32285 - atomic_long_t gts_free;
32286 - atomic_long_t gms_alloc;
32287 - atomic_long_t gms_free;
32288 - atomic_long_t gts_double_allocate;
32289 - atomic_long_t assign_context;
32290 - atomic_long_t assign_context_failed;
32291 - atomic_long_t free_context;
32292 - atomic_long_t load_user_context;
32293 - atomic_long_t load_kernel_context;
32294 - atomic_long_t lock_kernel_context;
32295 - atomic_long_t unlock_kernel_context;
32296 - atomic_long_t steal_user_context;
32297 - atomic_long_t steal_kernel_context;
32298 - atomic_long_t steal_context_failed;
32299 - atomic_long_t nopfn;
32300 - atomic_long_t asid_new;
32301 - atomic_long_t asid_next;
32302 - atomic_long_t asid_wrap;
32303 - atomic_long_t asid_reuse;
32304 - atomic_long_t intr;
32305 - atomic_long_t intr_cbr;
32306 - atomic_long_t intr_tfh;
32307 - atomic_long_t intr_spurious;
32308 - atomic_long_t intr_mm_lock_failed;
32309 - atomic_long_t call_os;
32310 - atomic_long_t call_os_wait_queue;
32311 - atomic_long_t user_flush_tlb;
32312 - atomic_long_t user_unload_context;
32313 - atomic_long_t user_exception;
32314 - atomic_long_t set_context_option;
32315 - atomic_long_t check_context_retarget_intr;
32316 - atomic_long_t check_context_unload;
32317 - atomic_long_t tlb_dropin;
32318 - atomic_long_t tlb_preload_page;
32319 - atomic_long_t tlb_dropin_fail_no_asid;
32320 - atomic_long_t tlb_dropin_fail_upm;
32321 - atomic_long_t tlb_dropin_fail_invalid;
32322 - atomic_long_t tlb_dropin_fail_range_active;
32323 - atomic_long_t tlb_dropin_fail_idle;
32324 - atomic_long_t tlb_dropin_fail_fmm;
32325 - atomic_long_t tlb_dropin_fail_no_exception;
32326 - atomic_long_t tfh_stale_on_fault;
32327 - atomic_long_t mmu_invalidate_range;
32328 - atomic_long_t mmu_invalidate_page;
32329 - atomic_long_t flush_tlb;
32330 - atomic_long_t flush_tlb_gru;
32331 - atomic_long_t flush_tlb_gru_tgh;
32332 - atomic_long_t flush_tlb_gru_zero_asid;
32333 + atomic_long_unchecked_t vdata_alloc;
32334 + atomic_long_unchecked_t vdata_free;
32335 + atomic_long_unchecked_t gts_alloc;
32336 + atomic_long_unchecked_t gts_free;
32337 + atomic_long_unchecked_t gms_alloc;
32338 + atomic_long_unchecked_t gms_free;
32339 + atomic_long_unchecked_t gts_double_allocate;
32340 + atomic_long_unchecked_t assign_context;
32341 + atomic_long_unchecked_t assign_context_failed;
32342 + atomic_long_unchecked_t free_context;
32343 + atomic_long_unchecked_t load_user_context;
32344 + atomic_long_unchecked_t load_kernel_context;
32345 + atomic_long_unchecked_t lock_kernel_context;
32346 + atomic_long_unchecked_t unlock_kernel_context;
32347 + atomic_long_unchecked_t steal_user_context;
32348 + atomic_long_unchecked_t steal_kernel_context;
32349 + atomic_long_unchecked_t steal_context_failed;
32350 + atomic_long_unchecked_t nopfn;
32351 + atomic_long_unchecked_t asid_new;
32352 + atomic_long_unchecked_t asid_next;
32353 + atomic_long_unchecked_t asid_wrap;
32354 + atomic_long_unchecked_t asid_reuse;
32355 + atomic_long_unchecked_t intr;
32356 + atomic_long_unchecked_t intr_cbr;
32357 + atomic_long_unchecked_t intr_tfh;
32358 + atomic_long_unchecked_t intr_spurious;
32359 + atomic_long_unchecked_t intr_mm_lock_failed;
32360 + atomic_long_unchecked_t call_os;
32361 + atomic_long_unchecked_t call_os_wait_queue;
32362 + atomic_long_unchecked_t user_flush_tlb;
32363 + atomic_long_unchecked_t user_unload_context;
32364 + atomic_long_unchecked_t user_exception;
32365 + atomic_long_unchecked_t set_context_option;
32366 + atomic_long_unchecked_t check_context_retarget_intr;
32367 + atomic_long_unchecked_t check_context_unload;
32368 + atomic_long_unchecked_t tlb_dropin;
32369 + atomic_long_unchecked_t tlb_preload_page;
32370 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32371 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32372 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32373 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32374 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32375 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32376 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32377 + atomic_long_unchecked_t tfh_stale_on_fault;
32378 + atomic_long_unchecked_t mmu_invalidate_range;
32379 + atomic_long_unchecked_t mmu_invalidate_page;
32380 + atomic_long_unchecked_t flush_tlb;
32381 + atomic_long_unchecked_t flush_tlb_gru;
32382 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32383 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32384
32385 - atomic_long_t copy_gpa;
32386 - atomic_long_t read_gpa;
32387 + atomic_long_unchecked_t copy_gpa;
32388 + atomic_long_unchecked_t read_gpa;
32389
32390 - atomic_long_t mesq_receive;
32391 - atomic_long_t mesq_receive_none;
32392 - atomic_long_t mesq_send;
32393 - atomic_long_t mesq_send_failed;
32394 - atomic_long_t mesq_noop;
32395 - atomic_long_t mesq_send_unexpected_error;
32396 - atomic_long_t mesq_send_lb_overflow;
32397 - atomic_long_t mesq_send_qlimit_reached;
32398 - atomic_long_t mesq_send_amo_nacked;
32399 - atomic_long_t mesq_send_put_nacked;
32400 - atomic_long_t mesq_page_overflow;
32401 - atomic_long_t mesq_qf_locked;
32402 - atomic_long_t mesq_qf_noop_not_full;
32403 - atomic_long_t mesq_qf_switch_head_failed;
32404 - atomic_long_t mesq_qf_unexpected_error;
32405 - atomic_long_t mesq_noop_unexpected_error;
32406 - atomic_long_t mesq_noop_lb_overflow;
32407 - atomic_long_t mesq_noop_qlimit_reached;
32408 - atomic_long_t mesq_noop_amo_nacked;
32409 - atomic_long_t mesq_noop_put_nacked;
32410 - atomic_long_t mesq_noop_page_overflow;
32411 + atomic_long_unchecked_t mesq_receive;
32412 + atomic_long_unchecked_t mesq_receive_none;
32413 + atomic_long_unchecked_t mesq_send;
32414 + atomic_long_unchecked_t mesq_send_failed;
32415 + atomic_long_unchecked_t mesq_noop;
32416 + atomic_long_unchecked_t mesq_send_unexpected_error;
32417 + atomic_long_unchecked_t mesq_send_lb_overflow;
32418 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32419 + atomic_long_unchecked_t mesq_send_amo_nacked;
32420 + atomic_long_unchecked_t mesq_send_put_nacked;
32421 + atomic_long_unchecked_t mesq_page_overflow;
32422 + atomic_long_unchecked_t mesq_qf_locked;
32423 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32424 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32425 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32426 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32427 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32428 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32429 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32430 + atomic_long_unchecked_t mesq_noop_put_nacked;
32431 + atomic_long_unchecked_t mesq_noop_page_overflow;
32432
32433 };
32434
32435 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32436 tghop_invalidate, mcsop_last};
32437
32438 struct mcs_op_statistic {
32439 - atomic_long_t count;
32440 - atomic_long_t total;
32441 + atomic_long_unchecked_t count;
32442 + atomic_long_unchecked_t total;
32443 unsigned long max;
32444 };
32445
32446 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32447
32448 #define STAT(id) do { \
32449 if (gru_options & OPT_STATS) \
32450 - atomic_long_inc(&gru_stats.id); \
32451 + atomic_long_inc_unchecked(&gru_stats.id); \
32452 } while (0)
32453
32454 #ifdef CONFIG_SGI_GRU_DEBUG
32455 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32456 index 851b2f2..a4ec097 100644
32457 --- a/drivers/misc/sgi-xp/xp.h
32458 +++ b/drivers/misc/sgi-xp/xp.h
32459 @@ -289,7 +289,7 @@ struct xpc_interface {
32460 xpc_notify_func, void *);
32461 void (*received) (short, int, void *);
32462 enum xp_retval (*partid_to_nasids) (short, void *);
32463 -};
32464 +} __no_const;
32465
32466 extern struct xpc_interface xpc_interface;
32467
32468 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32469 index b94d5f7..7f494c5 100644
32470 --- a/drivers/misc/sgi-xp/xpc.h
32471 +++ b/drivers/misc/sgi-xp/xpc.h
32472 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
32473 void (*received_payload) (struct xpc_channel *, void *);
32474 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32475 };
32476 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32477
32478 /* struct xpc_partition act_state values (for XPC HB) */
32479
32480 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32481 /* found in xpc_main.c */
32482 extern struct device *xpc_part;
32483 extern struct device *xpc_chan;
32484 -extern struct xpc_arch_operations xpc_arch_ops;
32485 +extern xpc_arch_operations_no_const xpc_arch_ops;
32486 extern int xpc_disengage_timelimit;
32487 extern int xpc_disengage_timedout;
32488 extern int xpc_activate_IRQ_rcvd;
32489 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32490 index 8d082b4..aa749ae 100644
32491 --- a/drivers/misc/sgi-xp/xpc_main.c
32492 +++ b/drivers/misc/sgi-xp/xpc_main.c
32493 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32494 .notifier_call = xpc_system_die,
32495 };
32496
32497 -struct xpc_arch_operations xpc_arch_ops;
32498 +xpc_arch_operations_no_const xpc_arch_ops;
32499
32500 /*
32501 * Timer function to enforce the timelimit on the partition disengage.
32502 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32503 index 6878a94..fe5c5f1 100644
32504 --- a/drivers/mmc/host/sdhci-pci.c
32505 +++ b/drivers/mmc/host/sdhci-pci.c
32506 @@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32507 .probe = via_probe,
32508 };
32509
32510 -static const struct pci_device_id pci_ids[] __devinitdata = {
32511 +static const struct pci_device_id pci_ids[] __devinitconst = {
32512 {
32513 .vendor = PCI_VENDOR_ID_RICOH,
32514 .device = PCI_DEVICE_ID_RICOH_R5C822,
32515 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32516 index e9fad91..0a7a16a 100644
32517 --- a/drivers/mtd/devices/doc2000.c
32518 +++ b/drivers/mtd/devices/doc2000.c
32519 @@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32520
32521 /* The ECC will not be calculated correctly if less than 512 is written */
32522 /* DBB-
32523 - if (len != 0x200 && eccbuf)
32524 + if (len != 0x200)
32525 printk(KERN_WARNING
32526 "ECC needs a full sector write (adr: %lx size %lx)\n",
32527 (long) to, (long) len);
32528 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32529 index a3f7a27..234016e 100644
32530 --- a/drivers/mtd/devices/doc2001.c
32531 +++ b/drivers/mtd/devices/doc2001.c
32532 @@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32533 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32534
32535 /* Don't allow read past end of device */
32536 - if (from >= this->totlen)
32537 + if (from >= this->totlen || !len)
32538 return -EINVAL;
32539
32540 /* Don't allow a single read to cross a 512-byte block boundary */
32541 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32542 index 3984d48..28aa897 100644
32543 --- a/drivers/mtd/nand/denali.c
32544 +++ b/drivers/mtd/nand/denali.c
32545 @@ -26,6 +26,7 @@
32546 #include <linux/pci.h>
32547 #include <linux/mtd/mtd.h>
32548 #include <linux/module.h>
32549 +#include <linux/slab.h>
32550
32551 #include "denali.h"
32552
32553 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32554 index ac40925..483b753 100644
32555 --- a/drivers/mtd/nftlmount.c
32556 +++ b/drivers/mtd/nftlmount.c
32557 @@ -24,6 +24,7 @@
32558 #include <asm/errno.h>
32559 #include <linux/delay.h>
32560 #include <linux/slab.h>
32561 +#include <linux/sched.h>
32562 #include <linux/mtd/mtd.h>
32563 #include <linux/mtd/nand.h>
32564 #include <linux/mtd/nftl.h>
32565 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32566 index 6c3fb5a..c542a81 100644
32567 --- a/drivers/mtd/ubi/build.c
32568 +++ b/drivers/mtd/ubi/build.c
32569 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32570 static int __init bytes_str_to_int(const char *str)
32571 {
32572 char *endp;
32573 - unsigned long result;
32574 + unsigned long result, scale = 1;
32575
32576 result = simple_strtoul(str, &endp, 0);
32577 if (str == endp || result >= INT_MAX) {
32578 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32579
32580 switch (*endp) {
32581 case 'G':
32582 - result *= 1024;
32583 + scale *= 1024;
32584 case 'M':
32585 - result *= 1024;
32586 + scale *= 1024;
32587 case 'K':
32588 - result *= 1024;
32589 + scale *= 1024;
32590 if (endp[1] == 'i' && endp[2] == 'B')
32591 endp += 2;
32592 case '\0':
32593 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32594 return -EINVAL;
32595 }
32596
32597 - return result;
32598 + if ((intoverflow_t)result*scale >= INT_MAX) {
32599 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32600 + str);
32601 + return -EINVAL;
32602 + }
32603 +
32604 + return result*scale;
32605 }
32606
32607 /**
32608 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32609 index 1feae59..c2a61d2 100644
32610 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
32611 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32612 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32613 */
32614
32615 #define ATL2_PARAM(X, desc) \
32616 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32617 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32618 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32619 MODULE_PARM_DESC(X, desc);
32620 #else
32621 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32622 index 9a517c2..a50cfcb 100644
32623 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32624 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32625 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32626
32627 int (*wait_comp)(struct bnx2x *bp,
32628 struct bnx2x_rx_mode_ramrod_params *p);
32629 -};
32630 +} __no_const;
32631
32632 /********************** Set multicast group ***********************************/
32633
32634 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32635 index 94b4bd0..73c02de 100644
32636 --- a/drivers/net/ethernet/broadcom/tg3.h
32637 +++ b/drivers/net/ethernet/broadcom/tg3.h
32638 @@ -134,6 +134,7 @@
32639 #define CHIPREV_ID_5750_A0 0x4000
32640 #define CHIPREV_ID_5750_A1 0x4001
32641 #define CHIPREV_ID_5750_A3 0x4003
32642 +#define CHIPREV_ID_5750_C1 0x4201
32643 #define CHIPREV_ID_5750_C2 0x4202
32644 #define CHIPREV_ID_5752_A0_HW 0x5000
32645 #define CHIPREV_ID_5752_A0 0x6000
32646 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32647 index c5f5479..2e8c260 100644
32648 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32649 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32650 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32651 */
32652 struct l2t_skb_cb {
32653 arp_failure_handler_func arp_failure_handler;
32654 -};
32655 +} __no_const;
32656
32657 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32658
32659 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32660 index 871bcaa..4043505 100644
32661 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
32662 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32663 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32664 for (i=0; i<ETH_ALEN; i++) {
32665 tmp.addr[i] = dev->dev_addr[i];
32666 }
32667 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32668 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32669 break;
32670
32671 case DE4X5_SET_HWADDR: /* Set the hardware address */
32672 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32673 spin_lock_irqsave(&lp->lock, flags);
32674 memcpy(&statbuf, &lp->pktStats, ioc->len);
32675 spin_unlock_irqrestore(&lp->lock, flags);
32676 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32677 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32678 return -EFAULT;
32679 break;
32680 }
32681 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32682 index 14d5b61..1398636 100644
32683 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
32684 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32685 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32686 {NULL}};
32687
32688
32689 -static const char *block_name[] __devinitdata = {
32690 +static const char *block_name[] __devinitconst = {
32691 "21140 non-MII",
32692 "21140 MII PHY",
32693 "21142 Serial PHY",
32694 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32695 index 4d01219..b58d26d 100644
32696 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32697 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32698 @@ -236,7 +236,7 @@ struct pci_id_info {
32699 int drv_flags; /* Driver use, intended as capability flags. */
32700 };
32701
32702 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32703 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32704 { /* Sometime a Level-One switch card. */
32705 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32706 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32707 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32708 index dcd7f7a..ecb7fb3 100644
32709 --- a/drivers/net/ethernet/dlink/sundance.c
32710 +++ b/drivers/net/ethernet/dlink/sundance.c
32711 @@ -218,7 +218,7 @@ enum {
32712 struct pci_id_info {
32713 const char *name;
32714 };
32715 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32716 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32717 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32718 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32719 {"D-Link DFE-580TX 4 port Server Adapter"},
32720 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32721 index bf266a0..e024af7 100644
32722 --- a/drivers/net/ethernet/emulex/benet/be_main.c
32723 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
32724 @@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32725
32726 if (wrapped)
32727 newacc += 65536;
32728 - ACCESS_ONCE(*acc) = newacc;
32729 + ACCESS_ONCE_RW(*acc) = newacc;
32730 }
32731
32732 void be_parse_stats(struct be_adapter *adapter)
32733 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32734 index 61d2bdd..7f1154a 100644
32735 --- a/drivers/net/ethernet/fealnx.c
32736 +++ b/drivers/net/ethernet/fealnx.c
32737 @@ -150,7 +150,7 @@ struct chip_info {
32738 int flags;
32739 };
32740
32741 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32742 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32743 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32744 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32745 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32746 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32747 index e1159e5..e18684d 100644
32748 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32749 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32750 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32751 {
32752 struct e1000_hw *hw = &adapter->hw;
32753 struct e1000_mac_info *mac = &hw->mac;
32754 - struct e1000_mac_operations *func = &mac->ops;
32755 + e1000_mac_operations_no_const *func = &mac->ops;
32756
32757 /* Set media type */
32758 switch (adapter->pdev->device) {
32759 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32760 index a3e65fd..f451444 100644
32761 --- a/drivers/net/ethernet/intel/e1000e/82571.c
32762 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
32763 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32764 {
32765 struct e1000_hw *hw = &adapter->hw;
32766 struct e1000_mac_info *mac = &hw->mac;
32767 - struct e1000_mac_operations *func = &mac->ops;
32768 + e1000_mac_operations_no_const *func = &mac->ops;
32769 u32 swsm = 0;
32770 u32 swsm2 = 0;
32771 bool force_clear_smbi = false;
32772 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32773 index 2967039..ca8c40c 100644
32774 --- a/drivers/net/ethernet/intel/e1000e/hw.h
32775 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
32776 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
32777 void (*write_vfta)(struct e1000_hw *, u32, u32);
32778 s32 (*read_mac_addr)(struct e1000_hw *);
32779 };
32780 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32781
32782 /*
32783 * When to use various PHY register access functions:
32784 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
32785 void (*power_up)(struct e1000_hw *);
32786 void (*power_down)(struct e1000_hw *);
32787 };
32788 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32789
32790 /* Function pointers for the NVM. */
32791 struct e1000_nvm_operations {
32792 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32793 s32 (*validate)(struct e1000_hw *);
32794 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32795 };
32796 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32797
32798 struct e1000_mac_info {
32799 - struct e1000_mac_operations ops;
32800 + e1000_mac_operations_no_const ops;
32801 u8 addr[ETH_ALEN];
32802 u8 perm_addr[ETH_ALEN];
32803
32804 @@ -872,7 +875,7 @@ struct e1000_mac_info {
32805 };
32806
32807 struct e1000_phy_info {
32808 - struct e1000_phy_operations ops;
32809 + e1000_phy_operations_no_const ops;
32810
32811 enum e1000_phy_type type;
32812
32813 @@ -906,7 +909,7 @@ struct e1000_phy_info {
32814 };
32815
32816 struct e1000_nvm_info {
32817 - struct e1000_nvm_operations ops;
32818 + e1000_nvm_operations_no_const ops;
32819
32820 enum e1000_nvm_type type;
32821 enum e1000_nvm_override override;
32822 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32823 index 4519a13..f97fcd0 100644
32824 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32825 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32826 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
32827 s32 (*read_mac_addr)(struct e1000_hw *);
32828 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32829 };
32830 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32831
32832 struct e1000_phy_operations {
32833 s32 (*acquire)(struct e1000_hw *);
32834 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
32835 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32836 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32837 };
32838 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32839
32840 struct e1000_nvm_operations {
32841 s32 (*acquire)(struct e1000_hw *);
32842 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32843 s32 (*update)(struct e1000_hw *);
32844 s32 (*validate)(struct e1000_hw *);
32845 };
32846 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32847
32848 struct e1000_info {
32849 s32 (*get_invariants)(struct e1000_hw *);
32850 @@ -350,7 +353,7 @@ struct e1000_info {
32851 extern const struct e1000_info e1000_82575_info;
32852
32853 struct e1000_mac_info {
32854 - struct e1000_mac_operations ops;
32855 + e1000_mac_operations_no_const ops;
32856
32857 u8 addr[6];
32858 u8 perm_addr[6];
32859 @@ -388,7 +391,7 @@ struct e1000_mac_info {
32860 };
32861
32862 struct e1000_phy_info {
32863 - struct e1000_phy_operations ops;
32864 + e1000_phy_operations_no_const ops;
32865
32866 enum e1000_phy_type type;
32867
32868 @@ -423,7 +426,7 @@ struct e1000_phy_info {
32869 };
32870
32871 struct e1000_nvm_info {
32872 - struct e1000_nvm_operations ops;
32873 + e1000_nvm_operations_no_const ops;
32874 enum e1000_nvm_type type;
32875 enum e1000_nvm_override override;
32876
32877 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32878 s32 (*check_for_ack)(struct e1000_hw *, u16);
32879 s32 (*check_for_rst)(struct e1000_hw *, u16);
32880 };
32881 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32882
32883 struct e1000_mbx_stats {
32884 u32 msgs_tx;
32885 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32886 };
32887
32888 struct e1000_mbx_info {
32889 - struct e1000_mbx_operations ops;
32890 + e1000_mbx_operations_no_const ops;
32891 struct e1000_mbx_stats stats;
32892 u32 timeout;
32893 u32 usec_delay;
32894 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32895 index d7ed58f..64cde36 100644
32896 --- a/drivers/net/ethernet/intel/igbvf/vf.h
32897 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
32898 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
32899 s32 (*read_mac_addr)(struct e1000_hw *);
32900 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32901 };
32902 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32903
32904 struct e1000_mac_info {
32905 - struct e1000_mac_operations ops;
32906 + e1000_mac_operations_no_const ops;
32907 u8 addr[6];
32908 u8 perm_addr[6];
32909
32910 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32911 s32 (*check_for_ack)(struct e1000_hw *);
32912 s32 (*check_for_rst)(struct e1000_hw *);
32913 };
32914 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32915
32916 struct e1000_mbx_stats {
32917 u32 msgs_tx;
32918 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32919 };
32920
32921 struct e1000_mbx_info {
32922 - struct e1000_mbx_operations ops;
32923 + e1000_mbx_operations_no_const ops;
32924 struct e1000_mbx_stats stats;
32925 u32 timeout;
32926 u32 usec_delay;
32927 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32928 index 6c5cca8..de8ef63 100644
32929 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32930 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32931 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32932 s32 (*update_checksum)(struct ixgbe_hw *);
32933 u16 (*calc_checksum)(struct ixgbe_hw *);
32934 };
32935 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32936
32937 struct ixgbe_mac_operations {
32938 s32 (*init_hw)(struct ixgbe_hw *);
32939 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32940 /* Manageability interface */
32941 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32942 };
32943 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32944
32945 struct ixgbe_phy_operations {
32946 s32 (*identify)(struct ixgbe_hw *);
32947 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32948 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32949 s32 (*check_overtemp)(struct ixgbe_hw *);
32950 };
32951 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32952
32953 struct ixgbe_eeprom_info {
32954 - struct ixgbe_eeprom_operations ops;
32955 + ixgbe_eeprom_operations_no_const ops;
32956 enum ixgbe_eeprom_type type;
32957 u32 semaphore_delay;
32958 u16 word_size;
32959 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32960
32961 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32962 struct ixgbe_mac_info {
32963 - struct ixgbe_mac_operations ops;
32964 + ixgbe_mac_operations_no_const ops;
32965 enum ixgbe_mac_type type;
32966 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32967 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32968 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32969 };
32970
32971 struct ixgbe_phy_info {
32972 - struct ixgbe_phy_operations ops;
32973 + ixgbe_phy_operations_no_const ops;
32974 struct mdio_if_info mdio;
32975 enum ixgbe_phy_type type;
32976 u32 id;
32977 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32978 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32979 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32980 };
32981 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32982
32983 struct ixgbe_mbx_stats {
32984 u32 msgs_tx;
32985 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32986 };
32987
32988 struct ixgbe_mbx_info {
32989 - struct ixgbe_mbx_operations ops;
32990 + ixgbe_mbx_operations_no_const ops;
32991 struct ixgbe_mbx_stats stats;
32992 u32 timeout;
32993 u32 usec_delay;
32994 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32995 index 10306b4..28df758 100644
32996 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32997 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32998 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32999 s32 (*clear_vfta)(struct ixgbe_hw *);
33000 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
33001 };
33002 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33003
33004 enum ixgbe_mac_type {
33005 ixgbe_mac_unknown = 0,
33006 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
33007 };
33008
33009 struct ixgbe_mac_info {
33010 - struct ixgbe_mac_operations ops;
33011 + ixgbe_mac_operations_no_const ops;
33012 u8 addr[6];
33013 u8 perm_addr[6];
33014
33015 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
33016 s32 (*check_for_ack)(struct ixgbe_hw *);
33017 s32 (*check_for_rst)(struct ixgbe_hw *);
33018 };
33019 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33020
33021 struct ixgbe_mbx_stats {
33022 u32 msgs_tx;
33023 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
33024 };
33025
33026 struct ixgbe_mbx_info {
33027 - struct ixgbe_mbx_operations ops;
33028 + ixgbe_mbx_operations_no_const ops;
33029 struct ixgbe_mbx_stats stats;
33030 u32 timeout;
33031 u32 udelay;
33032 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
33033 index 94bbc85..78c12e6 100644
33034 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
33035 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
33036 @@ -40,6 +40,7 @@
33037 #include <linux/dma-mapping.h>
33038 #include <linux/slab.h>
33039 #include <linux/io-mapping.h>
33040 +#include <linux/sched.h>
33041
33042 #include <linux/mlx4/device.h>
33043 #include <linux/mlx4/doorbell.h>
33044 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
33045 index 5046a64..71ca936 100644
33046 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
33047 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
33048 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
33049 void (*link_down)(struct __vxge_hw_device *devh);
33050 void (*crit_err)(struct __vxge_hw_device *devh,
33051 enum vxge_hw_event type, u64 ext_data);
33052 -};
33053 +} __no_const;
33054
33055 /*
33056 * struct __vxge_hw_blockpool_entry - Block private data structure
33057 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33058 index 4a518a3..936b334 100644
33059 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33060 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33061 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
33062 struct vxge_hw_mempool_dma *dma_object,
33063 u32 index,
33064 u32 is_last);
33065 -};
33066 +} __no_const;
33067
33068 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
33069 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
33070 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
33071 index c8f47f1..5da9840 100644
33072 --- a/drivers/net/ethernet/realtek/r8169.c
33073 +++ b/drivers/net/ethernet/realtek/r8169.c
33074 @@ -698,17 +698,17 @@ struct rtl8169_private {
33075 struct mdio_ops {
33076 void (*write)(void __iomem *, int, int);
33077 int (*read)(void __iomem *, int);
33078 - } mdio_ops;
33079 + } __no_const mdio_ops;
33080
33081 struct pll_power_ops {
33082 void (*down)(struct rtl8169_private *);
33083 void (*up)(struct rtl8169_private *);
33084 - } pll_power_ops;
33085 + } __no_const pll_power_ops;
33086
33087 struct jumbo_ops {
33088 void (*enable)(struct rtl8169_private *);
33089 void (*disable)(struct rtl8169_private *);
33090 - } jumbo_ops;
33091 + } __no_const jumbo_ops;
33092
33093 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
33094 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
33095 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
33096 index 1b4658c..a30dabb 100644
33097 --- a/drivers/net/ethernet/sis/sis190.c
33098 +++ b/drivers/net/ethernet/sis/sis190.c
33099 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
33100 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
33101 struct net_device *dev)
33102 {
33103 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
33104 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
33105 struct sis190_private *tp = netdev_priv(dev);
33106 struct pci_dev *isa_bridge;
33107 u8 reg, tmp8;
33108 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
33109 index edfa15d..002bfa9 100644
33110 --- a/drivers/net/ppp/ppp_generic.c
33111 +++ b/drivers/net/ppp/ppp_generic.c
33112 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33113 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
33114 struct ppp_stats stats;
33115 struct ppp_comp_stats cstats;
33116 - char *vers;
33117
33118 switch (cmd) {
33119 case SIOCGPPPSTATS:
33120 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33121 break;
33122
33123 case SIOCGPPPVER:
33124 - vers = PPP_VERSION;
33125 - if (copy_to_user(addr, vers, strlen(vers) + 1))
33126 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
33127 break;
33128 err = 0;
33129 break;
33130 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
33131 index 515f122..41dd273 100644
33132 --- a/drivers/net/tokenring/abyss.c
33133 +++ b/drivers/net/tokenring/abyss.c
33134 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
33135
33136 static int __init abyss_init (void)
33137 {
33138 - abyss_netdev_ops = tms380tr_netdev_ops;
33139 + pax_open_kernel();
33140 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33141
33142 - abyss_netdev_ops.ndo_open = abyss_open;
33143 - abyss_netdev_ops.ndo_stop = abyss_close;
33144 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
33145 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
33146 + pax_close_kernel();
33147
33148 return pci_register_driver(&abyss_driver);
33149 }
33150 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
33151 index 6153cfd..cf69c1c 100644
33152 --- a/drivers/net/tokenring/madgemc.c
33153 +++ b/drivers/net/tokenring/madgemc.c
33154 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
33155
33156 static int __init madgemc_init (void)
33157 {
33158 - madgemc_netdev_ops = tms380tr_netdev_ops;
33159 - madgemc_netdev_ops.ndo_open = madgemc_open;
33160 - madgemc_netdev_ops.ndo_stop = madgemc_close;
33161 + pax_open_kernel();
33162 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33163 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
33164 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
33165 + pax_close_kernel();
33166
33167 return mca_register_driver (&madgemc_driver);
33168 }
33169 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
33170 index 8d362e6..f91cc52 100644
33171 --- a/drivers/net/tokenring/proteon.c
33172 +++ b/drivers/net/tokenring/proteon.c
33173 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
33174 struct platform_device *pdev;
33175 int i, num = 0, err = 0;
33176
33177 - proteon_netdev_ops = tms380tr_netdev_ops;
33178 - proteon_netdev_ops.ndo_open = proteon_open;
33179 - proteon_netdev_ops.ndo_stop = tms380tr_close;
33180 + pax_open_kernel();
33181 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33182 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
33183 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
33184 + pax_close_kernel();
33185
33186 err = platform_driver_register(&proteon_driver);
33187 if (err)
33188 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
33189 index 46db5c5..37c1536 100644
33190 --- a/drivers/net/tokenring/skisa.c
33191 +++ b/drivers/net/tokenring/skisa.c
33192 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
33193 struct platform_device *pdev;
33194 int i, num = 0, err = 0;
33195
33196 - sk_isa_netdev_ops = tms380tr_netdev_ops;
33197 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
33198 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33199 + pax_open_kernel();
33200 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33201 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
33202 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33203 + pax_close_kernel();
33204
33205 err = platform_driver_register(&sk_isa_driver);
33206 if (err)
33207 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
33208 index 304fe78..db112fa 100644
33209 --- a/drivers/net/usb/hso.c
33210 +++ b/drivers/net/usb/hso.c
33211 @@ -71,7 +71,7 @@
33212 #include <asm/byteorder.h>
33213 #include <linux/serial_core.h>
33214 #include <linux/serial.h>
33215 -
33216 +#include <asm/local.h>
33217
33218 #define MOD_AUTHOR "Option Wireless"
33219 #define MOD_DESCRIPTION "USB High Speed Option driver"
33220 @@ -257,7 +257,7 @@ struct hso_serial {
33221
33222 /* from usb_serial_port */
33223 struct tty_struct *tty;
33224 - int open_count;
33225 + local_t open_count;
33226 spinlock_t serial_lock;
33227
33228 int (*write_data) (struct hso_serial *serial);
33229 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
33230 struct urb *urb;
33231
33232 urb = serial->rx_urb[0];
33233 - if (serial->open_count > 0) {
33234 + if (local_read(&serial->open_count) > 0) {
33235 count = put_rxbuf_data(urb, serial);
33236 if (count == -1)
33237 return;
33238 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
33239 DUMP1(urb->transfer_buffer, urb->actual_length);
33240
33241 /* Anyone listening? */
33242 - if (serial->open_count == 0)
33243 + if (local_read(&serial->open_count) == 0)
33244 return;
33245
33246 if (status == 0) {
33247 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33248 spin_unlock_irq(&serial->serial_lock);
33249
33250 /* check for port already opened, if not set the termios */
33251 - serial->open_count++;
33252 - if (serial->open_count == 1) {
33253 + if (local_inc_return(&serial->open_count) == 1) {
33254 serial->rx_state = RX_IDLE;
33255 /* Force default termio settings */
33256 _hso_serial_set_termios(tty, NULL);
33257 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33258 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33259 if (result) {
33260 hso_stop_serial_device(serial->parent);
33261 - serial->open_count--;
33262 + local_dec(&serial->open_count);
33263 kref_put(&serial->parent->ref, hso_serial_ref_free);
33264 }
33265 } else {
33266 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
33267
33268 /* reset the rts and dtr */
33269 /* do the actual close */
33270 - serial->open_count--;
33271 + local_dec(&serial->open_count);
33272
33273 - if (serial->open_count <= 0) {
33274 - serial->open_count = 0;
33275 + if (local_read(&serial->open_count) <= 0) {
33276 + local_set(&serial->open_count, 0);
33277 spin_lock_irq(&serial->serial_lock);
33278 if (serial->tty == tty) {
33279 serial->tty->driver_data = NULL;
33280 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
33281
33282 /* the actual setup */
33283 spin_lock_irqsave(&serial->serial_lock, flags);
33284 - if (serial->open_count)
33285 + if (local_read(&serial->open_count))
33286 _hso_serial_set_termios(tty, old);
33287 else
33288 tty->termios = old;
33289 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
33290 D1("Pending read interrupt on port %d\n", i);
33291 spin_lock(&serial->serial_lock);
33292 if (serial->rx_state == RX_IDLE &&
33293 - serial->open_count > 0) {
33294 + local_read(&serial->open_count) > 0) {
33295 /* Setup and send a ctrl req read on
33296 * port i */
33297 if (!serial->rx_urb_filled[0]) {
33298 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33299 /* Start all serial ports */
33300 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33301 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33302 - if (dev2ser(serial_table[i])->open_count) {
33303 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
33304 result =
33305 hso_start_serial_device(serial_table[i], GFP_NOIO);
33306 hso_kick_transmit(dev2ser(serial_table[i]));
33307 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33308 index e662cbc..8d4a102 100644
33309 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33310 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33311 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33312 * Return with error code if any of the queue indices
33313 * is out of range
33314 */
33315 - if (p->ring_index[i] < 0 ||
33316 - p->ring_index[i] >= adapter->num_rx_queues)
33317 + if (p->ring_index[i] >= adapter->num_rx_queues)
33318 return -EINVAL;
33319 }
33320
33321 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33322 index 0f9ee46..e2d6e65 100644
33323 --- a/drivers/net/wireless/ath/ath.h
33324 +++ b/drivers/net/wireless/ath/ath.h
33325 @@ -119,6 +119,7 @@ struct ath_ops {
33326 void (*write_flush) (void *);
33327 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33328 };
33329 +typedef struct ath_ops __no_const ath_ops_no_const;
33330
33331 struct ath_common;
33332 struct ath_bus_ops;
33333 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33334 index b592016..fe47870 100644
33335 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33336 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33337 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33338 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33339 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33340
33341 - ACCESS_ONCE(ads->ds_link) = i->link;
33342 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33343 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
33344 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33345
33346 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33347 ctl6 = SM(i->keytype, AR_EncrType);
33348 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33349
33350 if ((i->is_first || i->is_last) &&
33351 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33352 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33353 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33354 | set11nTries(i->rates, 1)
33355 | set11nTries(i->rates, 2)
33356 | set11nTries(i->rates, 3)
33357 | (i->dur_update ? AR_DurUpdateEna : 0)
33358 | SM(0, AR_BurstDur);
33359
33360 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33361 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33362 | set11nRate(i->rates, 1)
33363 | set11nRate(i->rates, 2)
33364 | set11nRate(i->rates, 3);
33365 } else {
33366 - ACCESS_ONCE(ads->ds_ctl2) = 0;
33367 - ACCESS_ONCE(ads->ds_ctl3) = 0;
33368 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33369 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33370 }
33371
33372 if (!i->is_first) {
33373 - ACCESS_ONCE(ads->ds_ctl0) = 0;
33374 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33375 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33376 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33377 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33378 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33379 return;
33380 }
33381
33382 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33383 break;
33384 }
33385
33386 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33387 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33388 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33389 | SM(i->txpower, AR_XmitPower)
33390 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33391 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33392 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33393 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33394
33395 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33396 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33397 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33398 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33399
33400 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33401 return;
33402
33403 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33404 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33405 | set11nPktDurRTSCTS(i->rates, 1);
33406
33407 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33408 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33409 | set11nPktDurRTSCTS(i->rates, 3);
33410
33411 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33412 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33413 | set11nRateFlags(i->rates, 1)
33414 | set11nRateFlags(i->rates, 2)
33415 | set11nRateFlags(i->rates, 3)
33416 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33417 index f5ae3c6..7936af3 100644
33418 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33419 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33420 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33421 (i->qcu << AR_TxQcuNum_S) | 0x17;
33422
33423 checksum += val;
33424 - ACCESS_ONCE(ads->info) = val;
33425 + ACCESS_ONCE_RW(ads->info) = val;
33426
33427 checksum += i->link;
33428 - ACCESS_ONCE(ads->link) = i->link;
33429 + ACCESS_ONCE_RW(ads->link) = i->link;
33430
33431 checksum += i->buf_addr[0];
33432 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33433 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33434 checksum += i->buf_addr[1];
33435 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33436 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33437 checksum += i->buf_addr[2];
33438 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33439 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33440 checksum += i->buf_addr[3];
33441 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33442 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33443
33444 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33445 - ACCESS_ONCE(ads->ctl3) = val;
33446 + ACCESS_ONCE_RW(ads->ctl3) = val;
33447 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33448 - ACCESS_ONCE(ads->ctl5) = val;
33449 + ACCESS_ONCE_RW(ads->ctl5) = val;
33450 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33451 - ACCESS_ONCE(ads->ctl7) = val;
33452 + ACCESS_ONCE_RW(ads->ctl7) = val;
33453 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33454 - ACCESS_ONCE(ads->ctl9) = val;
33455 + ACCESS_ONCE_RW(ads->ctl9) = val;
33456
33457 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33458 - ACCESS_ONCE(ads->ctl10) = checksum;
33459 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
33460
33461 if (i->is_first || i->is_last) {
33462 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33463 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33464 | set11nTries(i->rates, 1)
33465 | set11nTries(i->rates, 2)
33466 | set11nTries(i->rates, 3)
33467 | (i->dur_update ? AR_DurUpdateEna : 0)
33468 | SM(0, AR_BurstDur);
33469
33470 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33471 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33472 | set11nRate(i->rates, 1)
33473 | set11nRate(i->rates, 2)
33474 | set11nRate(i->rates, 3);
33475 } else {
33476 - ACCESS_ONCE(ads->ctl13) = 0;
33477 - ACCESS_ONCE(ads->ctl14) = 0;
33478 + ACCESS_ONCE_RW(ads->ctl13) = 0;
33479 + ACCESS_ONCE_RW(ads->ctl14) = 0;
33480 }
33481
33482 ads->ctl20 = 0;
33483 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33484
33485 ctl17 = SM(i->keytype, AR_EncrType);
33486 if (!i->is_first) {
33487 - ACCESS_ONCE(ads->ctl11) = 0;
33488 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33489 - ACCESS_ONCE(ads->ctl15) = 0;
33490 - ACCESS_ONCE(ads->ctl16) = 0;
33491 - ACCESS_ONCE(ads->ctl17) = ctl17;
33492 - ACCESS_ONCE(ads->ctl18) = 0;
33493 - ACCESS_ONCE(ads->ctl19) = 0;
33494 + ACCESS_ONCE_RW(ads->ctl11) = 0;
33495 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33496 + ACCESS_ONCE_RW(ads->ctl15) = 0;
33497 + ACCESS_ONCE_RW(ads->ctl16) = 0;
33498 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33499 + ACCESS_ONCE_RW(ads->ctl18) = 0;
33500 + ACCESS_ONCE_RW(ads->ctl19) = 0;
33501 return;
33502 }
33503
33504 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33505 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33506 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33507 | SM(i->txpower, AR_XmitPower)
33508 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33509 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33510 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33511 ctl12 |= SM(val, AR_PAPRDChainMask);
33512
33513 - ACCESS_ONCE(ads->ctl12) = ctl12;
33514 - ACCESS_ONCE(ads->ctl17) = ctl17;
33515 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33516 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33517
33518 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33519 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33520 | set11nPktDurRTSCTS(i->rates, 1);
33521
33522 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33523 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33524 | set11nPktDurRTSCTS(i->rates, 3);
33525
33526 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33527 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33528 | set11nRateFlags(i->rates, 1)
33529 | set11nRateFlags(i->rates, 2)
33530 | set11nRateFlags(i->rates, 3)
33531 | SM(i->rtscts_rate, AR_RTSCTSRate);
33532
33533 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33534 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33535 }
33536
33537 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33538 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33539 index f389b3c..7359e18 100644
33540 --- a/drivers/net/wireless/ath/ath9k/hw.h
33541 +++ b/drivers/net/wireless/ath/ath9k/hw.h
33542 @@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33543
33544 /* ANI */
33545 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33546 -};
33547 +} __no_const;
33548
33549 /**
33550 * struct ath_hw_ops - callbacks used by hardware code and driver code
33551 @@ -635,7 +635,7 @@ struct ath_hw_ops {
33552 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33553 struct ath_hw_antcomb_conf *antconf);
33554
33555 -};
33556 +} __no_const;
33557
33558 struct ath_nf_limits {
33559 s16 max;
33560 @@ -655,7 +655,7 @@ enum ath_cal_list {
33561 #define AH_FASTCC 0x4
33562
33563 struct ath_hw {
33564 - struct ath_ops reg_ops;
33565 + ath_ops_no_const reg_ops;
33566
33567 struct ieee80211_hw *hw;
33568 struct ath_common common;
33569 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33570 index bea8524..c677c06 100644
33571 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33572 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33573 @@ -547,7 +547,7 @@ struct phy_func_ptr {
33574 void (*carrsuppr)(struct brcms_phy *);
33575 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33576 void (*detach)(struct brcms_phy *);
33577 -};
33578 +} __no_const;
33579
33580 struct brcms_phy {
33581 struct brcms_phy_pub pubpi_ro;
33582 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33583 index 05f2ad1..ae00eea 100644
33584 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33585 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33586 @@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33587 */
33588 if (iwl3945_mod_params.disable_hw_scan) {
33589 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33590 - iwl3945_hw_ops.hw_scan = NULL;
33591 + pax_open_kernel();
33592 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33593 + pax_close_kernel();
33594 }
33595
33596 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33597 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33598 index 69a77e2..552b42c 100644
33599 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33600 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33601 @@ -71,8 +71,8 @@ do { \
33602 } while (0)
33603
33604 #else
33605 -#define IWL_DEBUG(m, level, fmt, args...)
33606 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33607 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33608 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33609 #define iwl_print_hex_dump(m, level, p, len)
33610 #endif /* CONFIG_IWLWIFI_DEBUG */
33611
33612 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33613 index 523ad55..f8c5dc5 100644
33614 --- a/drivers/net/wireless/mac80211_hwsim.c
33615 +++ b/drivers/net/wireless/mac80211_hwsim.c
33616 @@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33617 return -EINVAL;
33618
33619 if (fake_hw_scan) {
33620 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33621 - mac80211_hwsim_ops.sw_scan_start = NULL;
33622 - mac80211_hwsim_ops.sw_scan_complete = NULL;
33623 + pax_open_kernel();
33624 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33625 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33626 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33627 + pax_close_kernel();
33628 }
33629
33630 spin_lock_init(&hwsim_radio_lock);
33631 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33632 index 30f138b..c904585 100644
33633 --- a/drivers/net/wireless/mwifiex/main.h
33634 +++ b/drivers/net/wireless/mwifiex/main.h
33635 @@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33636 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33637 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33638 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33639 -};
33640 +} __no_const;
33641
33642 struct mwifiex_adapter {
33643 u8 iface_type;
33644 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33645 index 0c13840..a5c3ed6 100644
33646 --- a/drivers/net/wireless/rndis_wlan.c
33647 +++ b/drivers/net/wireless/rndis_wlan.c
33648 @@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33649
33650 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33651
33652 - if (rts_threshold < 0 || rts_threshold > 2347)
33653 + if (rts_threshold > 2347)
33654 rts_threshold = 2347;
33655
33656 tmp = cpu_to_le32(rts_threshold);
33657 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33658 index a77f1bb..c608b2b 100644
33659 --- a/drivers/net/wireless/wl1251/wl1251.h
33660 +++ b/drivers/net/wireless/wl1251/wl1251.h
33661 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
33662 void (*reset)(struct wl1251 *wl);
33663 void (*enable_irq)(struct wl1251 *wl);
33664 void (*disable_irq)(struct wl1251 *wl);
33665 -};
33666 +} __no_const;
33667
33668 struct wl1251 {
33669 struct ieee80211_hw *hw;
33670 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33671 index f34b5b2..b5abb9f 100644
33672 --- a/drivers/oprofile/buffer_sync.c
33673 +++ b/drivers/oprofile/buffer_sync.c
33674 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33675 if (cookie == NO_COOKIE)
33676 offset = pc;
33677 if (cookie == INVALID_COOKIE) {
33678 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33679 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33680 offset = pc;
33681 }
33682 if (cookie != last_cookie) {
33683 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33684 /* add userspace sample */
33685
33686 if (!mm) {
33687 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
33688 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33689 return 0;
33690 }
33691
33692 cookie = lookup_dcookie(mm, s->eip, &offset);
33693
33694 if (cookie == INVALID_COOKIE) {
33695 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33696 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33697 return 0;
33698 }
33699
33700 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33701 /* ignore backtraces if failed to add a sample */
33702 if (state == sb_bt_start) {
33703 state = sb_bt_ignore;
33704 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33705 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33706 }
33707 }
33708 release_mm(mm);
33709 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33710 index c0cc4e7..44d4e54 100644
33711 --- a/drivers/oprofile/event_buffer.c
33712 +++ b/drivers/oprofile/event_buffer.c
33713 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33714 }
33715
33716 if (buffer_pos == buffer_size) {
33717 - atomic_inc(&oprofile_stats.event_lost_overflow);
33718 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33719 return;
33720 }
33721
33722 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33723 index f8c752e..28bf4fc 100644
33724 --- a/drivers/oprofile/oprof.c
33725 +++ b/drivers/oprofile/oprof.c
33726 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33727 if (oprofile_ops.switch_events())
33728 return;
33729
33730 - atomic_inc(&oprofile_stats.multiplex_counter);
33731 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33732 start_switch_worker();
33733 }
33734
33735 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33736 index 917d28e..d62d981 100644
33737 --- a/drivers/oprofile/oprofile_stats.c
33738 +++ b/drivers/oprofile/oprofile_stats.c
33739 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33740 cpu_buf->sample_invalid_eip = 0;
33741 }
33742
33743 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33744 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33745 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
33746 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33747 - atomic_set(&oprofile_stats.multiplex_counter, 0);
33748 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33749 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33750 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33751 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33752 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33753 }
33754
33755
33756 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33757 index 38b6fc0..b5cbfce 100644
33758 --- a/drivers/oprofile/oprofile_stats.h
33759 +++ b/drivers/oprofile/oprofile_stats.h
33760 @@ -13,11 +13,11 @@
33761 #include <linux/atomic.h>
33762
33763 struct oprofile_stat_struct {
33764 - atomic_t sample_lost_no_mm;
33765 - atomic_t sample_lost_no_mapping;
33766 - atomic_t bt_lost_no_mapping;
33767 - atomic_t event_lost_overflow;
33768 - atomic_t multiplex_counter;
33769 + atomic_unchecked_t sample_lost_no_mm;
33770 + atomic_unchecked_t sample_lost_no_mapping;
33771 + atomic_unchecked_t bt_lost_no_mapping;
33772 + atomic_unchecked_t event_lost_overflow;
33773 + atomic_unchecked_t multiplex_counter;
33774 };
33775
33776 extern struct oprofile_stat_struct oprofile_stats;
33777 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33778 index 2f0aa0f..90fab02 100644
33779 --- a/drivers/oprofile/oprofilefs.c
33780 +++ b/drivers/oprofile/oprofilefs.c
33781 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33782
33783
33784 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33785 - char const *name, atomic_t *val)
33786 + char const *name, atomic_unchecked_t *val)
33787 {
33788 return __oprofilefs_create_file(sb, root, name,
33789 &atomic_ro_fops, 0444, val);
33790 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33791 index 3f56bc0..707d642 100644
33792 --- a/drivers/parport/procfs.c
33793 +++ b/drivers/parport/procfs.c
33794 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33795
33796 *ppos += len;
33797
33798 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33799 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33800 }
33801
33802 #ifdef CONFIG_PARPORT_1284
33803 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33804
33805 *ppos += len;
33806
33807 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33808 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33809 }
33810 #endif /* IEEE1284.3 support. */
33811
33812 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33813 index 9fff878..ad0ad53 100644
33814 --- a/drivers/pci/hotplug/cpci_hotplug.h
33815 +++ b/drivers/pci/hotplug/cpci_hotplug.h
33816 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33817 int (*hardware_test) (struct slot* slot, u32 value);
33818 u8 (*get_power) (struct slot* slot);
33819 int (*set_power) (struct slot* slot, int value);
33820 -};
33821 +} __no_const;
33822
33823 struct cpci_hp_controller {
33824 unsigned int irq;
33825 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33826 index 76ba8a1..20ca857 100644
33827 --- a/drivers/pci/hotplug/cpqphp_nvram.c
33828 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
33829 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33830
33831 void compaq_nvram_init (void __iomem *rom_start)
33832 {
33833 +
33834 +#ifndef CONFIG_PAX_KERNEXEC
33835 if (rom_start) {
33836 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33837 }
33838 +#endif
33839 +
33840 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33841
33842 /* initialize our int15 lock */
33843 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33844 index 1cfbf22..be96487 100644
33845 --- a/drivers/pci/pcie/aspm.c
33846 +++ b/drivers/pci/pcie/aspm.c
33847 @@ -27,9 +27,9 @@
33848 #define MODULE_PARAM_PREFIX "pcie_aspm."
33849
33850 /* Note: those are not register definitions */
33851 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33852 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33853 -#define ASPM_STATE_L1 (4) /* L1 state */
33854 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33855 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33856 +#define ASPM_STATE_L1 (4U) /* L1 state */
33857 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33858 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33859
33860 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33861 index 04e74f4..a960176 100644
33862 --- a/drivers/pci/probe.c
33863 +++ b/drivers/pci/probe.c
33864 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33865 u32 l, sz, mask;
33866 u16 orig_cmd;
33867
33868 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33869 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33870
33871 if (!dev->mmio_always_on) {
33872 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33873 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33874 index 27911b5..5b6db88 100644
33875 --- a/drivers/pci/proc.c
33876 +++ b/drivers/pci/proc.c
33877 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33878 static int __init pci_proc_init(void)
33879 {
33880 struct pci_dev *dev = NULL;
33881 +
33882 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33883 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33884 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33885 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33886 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33887 +#endif
33888 +#else
33889 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33890 +#endif
33891 proc_create("devices", 0, proc_bus_pci_dir,
33892 &proc_bus_pci_dev_operations);
33893 proc_initialized = 1;
33894 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33895 index 7b82868..b9344c9 100644
33896 --- a/drivers/platform/x86/thinkpad_acpi.c
33897 +++ b/drivers/platform/x86/thinkpad_acpi.c
33898 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33899 return 0;
33900 }
33901
33902 -void static hotkey_mask_warn_incomplete_mask(void)
33903 +static void hotkey_mask_warn_incomplete_mask(void)
33904 {
33905 /* log only what the user can fix... */
33906 const u32 wantedmask = hotkey_driver_mask &
33907 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33908 }
33909 }
33910
33911 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33912 - struct tp_nvram_state *newn,
33913 - const u32 event_mask)
33914 -{
33915 -
33916 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33917 do { \
33918 if ((event_mask & (1 << __scancode)) && \
33919 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33920 tpacpi_hotkey_send_key(__scancode); \
33921 } while (0)
33922
33923 - void issue_volchange(const unsigned int oldvol,
33924 - const unsigned int newvol)
33925 - {
33926 - unsigned int i = oldvol;
33927 +static void issue_volchange(const unsigned int oldvol,
33928 + const unsigned int newvol,
33929 + const u32 event_mask)
33930 +{
33931 + unsigned int i = oldvol;
33932
33933 - while (i > newvol) {
33934 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33935 - i--;
33936 - }
33937 - while (i < newvol) {
33938 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33939 - i++;
33940 - }
33941 + while (i > newvol) {
33942 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33943 + i--;
33944 }
33945 + while (i < newvol) {
33946 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33947 + i++;
33948 + }
33949 +}
33950
33951 - void issue_brightnesschange(const unsigned int oldbrt,
33952 - const unsigned int newbrt)
33953 - {
33954 - unsigned int i = oldbrt;
33955 +static void issue_brightnesschange(const unsigned int oldbrt,
33956 + const unsigned int newbrt,
33957 + const u32 event_mask)
33958 +{
33959 + unsigned int i = oldbrt;
33960
33961 - while (i > newbrt) {
33962 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33963 - i--;
33964 - }
33965 - while (i < newbrt) {
33966 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33967 - i++;
33968 - }
33969 + while (i > newbrt) {
33970 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33971 + i--;
33972 + }
33973 + while (i < newbrt) {
33974 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33975 + i++;
33976 }
33977 +}
33978
33979 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33980 + struct tp_nvram_state *newn,
33981 + const u32 event_mask)
33982 +{
33983 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33984 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33985 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33986 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33987 oldn->volume_level != newn->volume_level) {
33988 /* recently muted, or repeated mute keypress, or
33989 * multiple presses ending in mute */
33990 - issue_volchange(oldn->volume_level, newn->volume_level);
33991 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33992 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33993 }
33994 } else {
33995 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33996 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33997 }
33998 if (oldn->volume_level != newn->volume_level) {
33999 - issue_volchange(oldn->volume_level, newn->volume_level);
34000 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
34001 } else if (oldn->volume_toggle != newn->volume_toggle) {
34002 /* repeated vol up/down keypress at end of scale ? */
34003 if (newn->volume_level == 0)
34004 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
34005 /* handle brightness */
34006 if (oldn->brightness_level != newn->brightness_level) {
34007 issue_brightnesschange(oldn->brightness_level,
34008 - newn->brightness_level);
34009 + newn->brightness_level,
34010 + event_mask);
34011 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
34012 /* repeated key presses that didn't change state */
34013 if (newn->brightness_level == 0)
34014 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
34015 && !tp_features.bright_unkfw)
34016 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
34017 }
34018 +}
34019
34020 #undef TPACPI_COMPARE_KEY
34021 #undef TPACPI_MAY_SEND_KEY
34022 -}
34023
34024 /*
34025 * Polling driver
34026 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
34027 index b859d16..5cc6b1a 100644
34028 --- a/drivers/pnp/pnpbios/bioscalls.c
34029 +++ b/drivers/pnp/pnpbios/bioscalls.c
34030 @@ -59,7 +59,7 @@ do { \
34031 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
34032 } while(0)
34033
34034 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
34035 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
34036 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
34037
34038 /*
34039 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
34040
34041 cpu = get_cpu();
34042 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
34043 +
34044 + pax_open_kernel();
34045 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
34046 + pax_close_kernel();
34047
34048 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
34049 spin_lock_irqsave(&pnp_bios_lock, flags);
34050 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
34051 :"memory");
34052 spin_unlock_irqrestore(&pnp_bios_lock, flags);
34053
34054 + pax_open_kernel();
34055 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
34056 + pax_close_kernel();
34057 +
34058 put_cpu();
34059
34060 /* If we get here and this is set then the PnP BIOS faulted on us. */
34061 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
34062 return status;
34063 }
34064
34065 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
34066 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
34067 {
34068 int i;
34069
34070 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
34071 pnp_bios_callpoint.offset = header->fields.pm16offset;
34072 pnp_bios_callpoint.segment = PNP_CS16;
34073
34074 + pax_open_kernel();
34075 +
34076 for_each_possible_cpu(i) {
34077 struct desc_struct *gdt = get_cpu_gdt_table(i);
34078 if (!gdt)
34079 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
34080 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
34081 (unsigned long)__va(header->fields.pm16dseg));
34082 }
34083 +
34084 + pax_close_kernel();
34085 }
34086 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
34087 index b0ecacb..7c9da2e 100644
34088 --- a/drivers/pnp/resource.c
34089 +++ b/drivers/pnp/resource.c
34090 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
34091 return 1;
34092
34093 /* check if the resource is valid */
34094 - if (*irq < 0 || *irq > 15)
34095 + if (*irq > 15)
34096 return 0;
34097
34098 /* check if the resource is reserved */
34099 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
34100 return 1;
34101
34102 /* check if the resource is valid */
34103 - if (*dma < 0 || *dma == 4 || *dma > 7)
34104 + if (*dma == 4 || *dma > 7)
34105 return 0;
34106
34107 /* check if the resource is reserved */
34108 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
34109 index bb16f5b..c751eef 100644
34110 --- a/drivers/power/bq27x00_battery.c
34111 +++ b/drivers/power/bq27x00_battery.c
34112 @@ -67,7 +67,7 @@
34113 struct bq27x00_device_info;
34114 struct bq27x00_access_methods {
34115 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
34116 -};
34117 +} __no_const;
34118
34119 enum bq27x00_chip { BQ27000, BQ27500 };
34120
34121 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
34122 index 33f5d9a..d957d3f 100644
34123 --- a/drivers/regulator/max8660.c
34124 +++ b/drivers/regulator/max8660.c
34125 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
34126 max8660->shadow_regs[MAX8660_OVER1] = 5;
34127 } else {
34128 /* Otherwise devices can be toggled via software */
34129 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
34130 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
34131 + pax_open_kernel();
34132 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
34133 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
34134 + pax_close_kernel();
34135 }
34136
34137 /*
34138 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
34139 index 023d17d..74ef35b 100644
34140 --- a/drivers/regulator/mc13892-regulator.c
34141 +++ b/drivers/regulator/mc13892-regulator.c
34142 @@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
34143 }
34144 mc13xxx_unlock(mc13892);
34145
34146 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34147 + pax_open_kernel();
34148 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34149 = mc13892_vcam_set_mode;
34150 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34151 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34152 = mc13892_vcam_get_mode;
34153 + pax_close_kernel();
34154 for (i = 0; i < pdata->num_regulators; i++) {
34155 init_data = &pdata->regulators[i];
34156 priv->regulators[i] = regulator_register(
34157 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
34158 index cace6d3..f623fda 100644
34159 --- a/drivers/rtc/rtc-dev.c
34160 +++ b/drivers/rtc/rtc-dev.c
34161 @@ -14,6 +14,7 @@
34162 #include <linux/module.h>
34163 #include <linux/rtc.h>
34164 #include <linux/sched.h>
34165 +#include <linux/grsecurity.h>
34166 #include "rtc-core.h"
34167
34168 static dev_t rtc_devt;
34169 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
34170 if (copy_from_user(&tm, uarg, sizeof(tm)))
34171 return -EFAULT;
34172
34173 + gr_log_timechange();
34174 +
34175 return rtc_set_time(rtc, &tm);
34176
34177 case RTC_PIE_ON:
34178 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
34179 index ffb5878..e6d785c 100644
34180 --- a/drivers/scsi/aacraid/aacraid.h
34181 +++ b/drivers/scsi/aacraid/aacraid.h
34182 @@ -492,7 +492,7 @@ struct adapter_ops
34183 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
34184 /* Administrative operations */
34185 int (*adapter_comm)(struct aac_dev * dev, int comm);
34186 -};
34187 +} __no_const;
34188
34189 /*
34190 * Define which interrupt handler needs to be installed
34191 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
34192 index 705e13e..91c873c 100644
34193 --- a/drivers/scsi/aacraid/linit.c
34194 +++ b/drivers/scsi/aacraid/linit.c
34195 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
34196 #elif defined(__devinitconst)
34197 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34198 #else
34199 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
34200 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34201 #endif
34202 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
34203 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
34204 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
34205 index d5ff142..49c0ebb 100644
34206 --- a/drivers/scsi/aic94xx/aic94xx_init.c
34207 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
34208 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
34209 .lldd_control_phy = asd_control_phy,
34210 };
34211
34212 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
34213 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
34214 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
34215 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
34216 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
34217 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
34218 index a796de9..1ef20e1 100644
34219 --- a/drivers/scsi/bfa/bfa.h
34220 +++ b/drivers/scsi/bfa/bfa.h
34221 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
34222 u32 *end);
34223 int cpe_vec_q0;
34224 int rme_vec_q0;
34225 -};
34226 +} __no_const;
34227 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
34228
34229 struct bfa_faa_cbfn_s {
34230 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
34231 index e07bd47..cd1bbbb 100644
34232 --- a/drivers/scsi/bfa/bfa_fcpim.c
34233 +++ b/drivers/scsi/bfa/bfa_fcpim.c
34234 @@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
34235
34236 bfa_iotag_attach(fcp);
34237
34238 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
34239 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
34240 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
34241 (fcp->num_itns * sizeof(struct bfa_itn_s));
34242 memset(fcp->itn_arr, 0,
34243 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34244 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
34245 {
34246 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
34247 - struct bfa_itn_s *itn;
34248 + bfa_itn_s_no_const *itn;
34249
34250 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
34251 itn->isr = isr;
34252 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
34253 index 1080bcb..a3b39e3 100644
34254 --- a/drivers/scsi/bfa/bfa_fcpim.h
34255 +++ b/drivers/scsi/bfa/bfa_fcpim.h
34256 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
34257 struct bfa_itn_s {
34258 bfa_isr_func_t isr;
34259 };
34260 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
34261
34262 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34263 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
34264 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
34265 struct list_head iotag_tio_free_q; /* free IO resources */
34266 struct list_head iotag_unused_q; /* unused IO resources*/
34267 struct bfa_iotag_s *iotag_arr;
34268 - struct bfa_itn_s *itn_arr;
34269 + bfa_itn_s_no_const *itn_arr;
34270 int num_ioim_reqs;
34271 int num_fwtio_reqs;
34272 int num_itns;
34273 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
34274 index 546d46b..642fa5b 100644
34275 --- a/drivers/scsi/bfa/bfa_ioc.h
34276 +++ b/drivers/scsi/bfa/bfa_ioc.h
34277 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
34278 bfa_ioc_disable_cbfn_t disable_cbfn;
34279 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
34280 bfa_ioc_reset_cbfn_t reset_cbfn;
34281 -};
34282 +} __no_const;
34283
34284 /*
34285 * IOC event notification mechanism.
34286 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
34287 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
34288 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
34289 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
34290 -};
34291 +} __no_const;
34292
34293 /*
34294 * Queue element to wait for room in request queue. FIFO order is
34295 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34296 index 351dc0b..951dc32 100644
34297 --- a/drivers/scsi/hosts.c
34298 +++ b/drivers/scsi/hosts.c
34299 @@ -42,7 +42,7 @@
34300 #include "scsi_logging.h"
34301
34302
34303 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
34304 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34305
34306
34307 static void scsi_host_cls_release(struct device *dev)
34308 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34309 * subtract one because we increment first then return, but we need to
34310 * know what the next host number was before increment
34311 */
34312 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34313 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34314 shost->dma_channel = 0xff;
34315
34316 /* These three are default values which can be overridden */
34317 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34318 index 865d452..e9b7fa7 100644
34319 --- a/drivers/scsi/hpsa.c
34320 +++ b/drivers/scsi/hpsa.c
34321 @@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34322 u32 a;
34323
34324 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34325 - return h->access.command_completed(h);
34326 + return h->access->command_completed(h);
34327
34328 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34329 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34330 @@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34331 while (!list_empty(&h->reqQ)) {
34332 c = list_entry(h->reqQ.next, struct CommandList, list);
34333 /* can't do anything if fifo is full */
34334 - if ((h->access.fifo_full(h))) {
34335 + if ((h->access->fifo_full(h))) {
34336 dev_warn(&h->pdev->dev, "fifo full\n");
34337 break;
34338 }
34339 @@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34340 h->Qdepth--;
34341
34342 /* Tell the controller execute command */
34343 - h->access.submit_command(h, c);
34344 + h->access->submit_command(h, c);
34345
34346 /* Put job onto the completed Q */
34347 addQ(&h->cmpQ, c);
34348 @@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34349
34350 static inline unsigned long get_next_completion(struct ctlr_info *h)
34351 {
34352 - return h->access.command_completed(h);
34353 + return h->access->command_completed(h);
34354 }
34355
34356 static inline bool interrupt_pending(struct ctlr_info *h)
34357 {
34358 - return h->access.intr_pending(h);
34359 + return h->access->intr_pending(h);
34360 }
34361
34362 static inline long interrupt_not_for_us(struct ctlr_info *h)
34363 {
34364 - return (h->access.intr_pending(h) == 0) ||
34365 + return (h->access->intr_pending(h) == 0) ||
34366 (h->interrupts_enabled == 0);
34367 }
34368
34369 @@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34370 if (prod_index < 0)
34371 return -ENODEV;
34372 h->product_name = products[prod_index].product_name;
34373 - h->access = *(products[prod_index].access);
34374 + h->access = products[prod_index].access;
34375
34376 if (hpsa_board_disabled(h->pdev)) {
34377 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34378 @@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34379
34380 assert_spin_locked(&lockup_detector_lock);
34381 remove_ctlr_from_lockup_detector_list(h);
34382 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34383 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34384 spin_lock_irqsave(&h->lock, flags);
34385 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34386 spin_unlock_irqrestore(&h->lock, flags);
34387 @@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34388 }
34389
34390 /* make sure the board interrupts are off */
34391 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34392 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34393
34394 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34395 goto clean2;
34396 @@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34397 * fake ones to scoop up any residual completions.
34398 */
34399 spin_lock_irqsave(&h->lock, flags);
34400 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34401 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34402 spin_unlock_irqrestore(&h->lock, flags);
34403 free_irq(h->intr[h->intr_mode], h);
34404 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34405 @@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34406 dev_info(&h->pdev->dev, "Board READY.\n");
34407 dev_info(&h->pdev->dev,
34408 "Waiting for stale completions to drain.\n");
34409 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34410 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34411 msleep(10000);
34412 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34413 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34414
34415 rc = controller_reset_failed(h->cfgtable);
34416 if (rc)
34417 @@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34418 }
34419
34420 /* Turn the interrupts on so we can service requests */
34421 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34422 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34423
34424 hpsa_hba_inquiry(h);
34425 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34426 @@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34427 * To write all data in the battery backed cache to disks
34428 */
34429 hpsa_flush_cache(h);
34430 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34431 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34432 free_irq(h->intr[h->intr_mode], h);
34433 #ifdef CONFIG_PCI_MSI
34434 if (h->msix_vector)
34435 @@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34436 return;
34437 }
34438 /* Change the access methods to the performant access methods */
34439 - h->access = SA5_performant_access;
34440 + h->access = &SA5_performant_access;
34441 h->transMethod = CFGTBL_Trans_Performant;
34442 }
34443
34444 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34445 index 91edafb..a9b88ec 100644
34446 --- a/drivers/scsi/hpsa.h
34447 +++ b/drivers/scsi/hpsa.h
34448 @@ -73,7 +73,7 @@ struct ctlr_info {
34449 unsigned int msix_vector;
34450 unsigned int msi_vector;
34451 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34452 - struct access_method access;
34453 + struct access_method *access;
34454
34455 /* queue and queue Info */
34456 struct list_head reqQ;
34457 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34458 index f2df059..a3a9930 100644
34459 --- a/drivers/scsi/ips.h
34460 +++ b/drivers/scsi/ips.h
34461 @@ -1027,7 +1027,7 @@ typedef struct {
34462 int (*intr)(struct ips_ha *);
34463 void (*enableint)(struct ips_ha *);
34464 uint32_t (*statupd)(struct ips_ha *);
34465 -} ips_hw_func_t;
34466 +} __no_const ips_hw_func_t;
34467
34468 typedef struct ips_ha {
34469 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34470 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34471 index 9de9db2..1e09660 100644
34472 --- a/drivers/scsi/libfc/fc_exch.c
34473 +++ b/drivers/scsi/libfc/fc_exch.c
34474 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
34475 * all together if not used XXX
34476 */
34477 struct {
34478 - atomic_t no_free_exch;
34479 - atomic_t no_free_exch_xid;
34480 - atomic_t xid_not_found;
34481 - atomic_t xid_busy;
34482 - atomic_t seq_not_found;
34483 - atomic_t non_bls_resp;
34484 + atomic_unchecked_t no_free_exch;
34485 + atomic_unchecked_t no_free_exch_xid;
34486 + atomic_unchecked_t xid_not_found;
34487 + atomic_unchecked_t xid_busy;
34488 + atomic_unchecked_t seq_not_found;
34489 + atomic_unchecked_t non_bls_resp;
34490 } stats;
34491 };
34492
34493 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34494 /* allocate memory for exchange */
34495 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34496 if (!ep) {
34497 - atomic_inc(&mp->stats.no_free_exch);
34498 + atomic_inc_unchecked(&mp->stats.no_free_exch);
34499 goto out;
34500 }
34501 memset(ep, 0, sizeof(*ep));
34502 @@ -780,7 +780,7 @@ out:
34503 return ep;
34504 err:
34505 spin_unlock_bh(&pool->lock);
34506 - atomic_inc(&mp->stats.no_free_exch_xid);
34507 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34508 mempool_free(ep, mp->ep_pool);
34509 return NULL;
34510 }
34511 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34512 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34513 ep = fc_exch_find(mp, xid);
34514 if (!ep) {
34515 - atomic_inc(&mp->stats.xid_not_found);
34516 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34517 reject = FC_RJT_OX_ID;
34518 goto out;
34519 }
34520 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34521 ep = fc_exch_find(mp, xid);
34522 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34523 if (ep) {
34524 - atomic_inc(&mp->stats.xid_busy);
34525 + atomic_inc_unchecked(&mp->stats.xid_busy);
34526 reject = FC_RJT_RX_ID;
34527 goto rel;
34528 }
34529 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34530 }
34531 xid = ep->xid; /* get our XID */
34532 } else if (!ep) {
34533 - atomic_inc(&mp->stats.xid_not_found);
34534 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34535 reject = FC_RJT_RX_ID; /* XID not found */
34536 goto out;
34537 }
34538 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34539 } else {
34540 sp = &ep->seq;
34541 if (sp->id != fh->fh_seq_id) {
34542 - atomic_inc(&mp->stats.seq_not_found);
34543 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34544 if (f_ctl & FC_FC_END_SEQ) {
34545 /*
34546 * Update sequence_id based on incoming last
34547 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34548
34549 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34550 if (!ep) {
34551 - atomic_inc(&mp->stats.xid_not_found);
34552 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34553 goto out;
34554 }
34555 if (ep->esb_stat & ESB_ST_COMPLETE) {
34556 - atomic_inc(&mp->stats.xid_not_found);
34557 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34558 goto rel;
34559 }
34560 if (ep->rxid == FC_XID_UNKNOWN)
34561 ep->rxid = ntohs(fh->fh_rx_id);
34562 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34563 - atomic_inc(&mp->stats.xid_not_found);
34564 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34565 goto rel;
34566 }
34567 if (ep->did != ntoh24(fh->fh_s_id) &&
34568 ep->did != FC_FID_FLOGI) {
34569 - atomic_inc(&mp->stats.xid_not_found);
34570 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34571 goto rel;
34572 }
34573 sof = fr_sof(fp);
34574 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34575 sp->ssb_stat |= SSB_ST_RESP;
34576 sp->id = fh->fh_seq_id;
34577 } else if (sp->id != fh->fh_seq_id) {
34578 - atomic_inc(&mp->stats.seq_not_found);
34579 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34580 goto rel;
34581 }
34582
34583 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34584 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34585
34586 if (!sp)
34587 - atomic_inc(&mp->stats.xid_not_found);
34588 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34589 else
34590 - atomic_inc(&mp->stats.non_bls_resp);
34591 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
34592
34593 fc_frame_free(fp);
34594 }
34595 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34596 index db9238f..4378ed2 100644
34597 --- a/drivers/scsi/libsas/sas_ata.c
34598 +++ b/drivers/scsi/libsas/sas_ata.c
34599 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34600 .postreset = ata_std_postreset,
34601 .error_handler = ata_std_error_handler,
34602 .post_internal_cmd = sas_ata_post_internal,
34603 - .qc_defer = ata_std_qc_defer,
34604 + .qc_defer = ata_std_qc_defer,
34605 .qc_prep = ata_noop_qc_prep,
34606 .qc_issue = sas_ata_qc_issue,
34607 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34608 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34609 index bb4c8e0..f33d849 100644
34610 --- a/drivers/scsi/lpfc/lpfc.h
34611 +++ b/drivers/scsi/lpfc/lpfc.h
34612 @@ -425,7 +425,7 @@ struct lpfc_vport {
34613 struct dentry *debug_nodelist;
34614 struct dentry *vport_debugfs_root;
34615 struct lpfc_debugfs_trc *disc_trc;
34616 - atomic_t disc_trc_cnt;
34617 + atomic_unchecked_t disc_trc_cnt;
34618 #endif
34619 uint8_t stat_data_enabled;
34620 uint8_t stat_data_blocked;
34621 @@ -835,8 +835,8 @@ struct lpfc_hba {
34622 struct timer_list fabric_block_timer;
34623 unsigned long bit_flags;
34624 #define FABRIC_COMANDS_BLOCKED 0
34625 - atomic_t num_rsrc_err;
34626 - atomic_t num_cmd_success;
34627 + atomic_unchecked_t num_rsrc_err;
34628 + atomic_unchecked_t num_cmd_success;
34629 unsigned long last_rsrc_error_time;
34630 unsigned long last_ramp_down_time;
34631 unsigned long last_ramp_up_time;
34632 @@ -866,7 +866,7 @@ struct lpfc_hba {
34633
34634 struct dentry *debug_slow_ring_trc;
34635 struct lpfc_debugfs_trc *slow_ring_trc;
34636 - atomic_t slow_ring_trc_cnt;
34637 + atomic_unchecked_t slow_ring_trc_cnt;
34638 /* iDiag debugfs sub-directory */
34639 struct dentry *idiag_root;
34640 struct dentry *idiag_pci_cfg;
34641 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34642 index 2838259..a07cfb5 100644
34643 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
34644 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34645 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34646
34647 #include <linux/debugfs.h>
34648
34649 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34650 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34651 static unsigned long lpfc_debugfs_start_time = 0L;
34652
34653 /* iDiag */
34654 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34655 lpfc_debugfs_enable = 0;
34656
34657 len = 0;
34658 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34659 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34660 (lpfc_debugfs_max_disc_trc - 1);
34661 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34662 dtp = vport->disc_trc + i;
34663 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34664 lpfc_debugfs_enable = 0;
34665
34666 len = 0;
34667 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34668 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34669 (lpfc_debugfs_max_slow_ring_trc - 1);
34670 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34671 dtp = phba->slow_ring_trc + i;
34672 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34673 !vport || !vport->disc_trc)
34674 return;
34675
34676 - index = atomic_inc_return(&vport->disc_trc_cnt) &
34677 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34678 (lpfc_debugfs_max_disc_trc - 1);
34679 dtp = vport->disc_trc + index;
34680 dtp->fmt = fmt;
34681 dtp->data1 = data1;
34682 dtp->data2 = data2;
34683 dtp->data3 = data3;
34684 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34685 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34686 dtp->jif = jiffies;
34687 #endif
34688 return;
34689 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34690 !phba || !phba->slow_ring_trc)
34691 return;
34692
34693 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34694 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34695 (lpfc_debugfs_max_slow_ring_trc - 1);
34696 dtp = phba->slow_ring_trc + index;
34697 dtp->fmt = fmt;
34698 dtp->data1 = data1;
34699 dtp->data2 = data2;
34700 dtp->data3 = data3;
34701 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34702 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34703 dtp->jif = jiffies;
34704 #endif
34705 return;
34706 @@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34707 "slow_ring buffer\n");
34708 goto debug_failed;
34709 }
34710 - atomic_set(&phba->slow_ring_trc_cnt, 0);
34711 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34712 memset(phba->slow_ring_trc, 0,
34713 (sizeof(struct lpfc_debugfs_trc) *
34714 lpfc_debugfs_max_slow_ring_trc));
34715 @@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34716 "buffer\n");
34717 goto debug_failed;
34718 }
34719 - atomic_set(&vport->disc_trc_cnt, 0);
34720 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34721
34722 snprintf(name, sizeof(name), "discovery_trace");
34723 vport->debug_disc_trc =
34724 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34725 index 55bc4fc..a2a109c 100644
34726 --- a/drivers/scsi/lpfc/lpfc_init.c
34727 +++ b/drivers/scsi/lpfc/lpfc_init.c
34728 @@ -10027,8 +10027,10 @@ lpfc_init(void)
34729 printk(LPFC_COPYRIGHT "\n");
34730
34731 if (lpfc_enable_npiv) {
34732 - lpfc_transport_functions.vport_create = lpfc_vport_create;
34733 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34734 + pax_open_kernel();
34735 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34736 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34737 + pax_close_kernel();
34738 }
34739 lpfc_transport_template =
34740 fc_attach_transport(&lpfc_transport_functions);
34741 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34742 index 2e1e54e..1af0a0d 100644
34743 --- a/drivers/scsi/lpfc/lpfc_scsi.c
34744 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
34745 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34746 uint32_t evt_posted;
34747
34748 spin_lock_irqsave(&phba->hbalock, flags);
34749 - atomic_inc(&phba->num_rsrc_err);
34750 + atomic_inc_unchecked(&phba->num_rsrc_err);
34751 phba->last_rsrc_error_time = jiffies;
34752
34753 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34754 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34755 unsigned long flags;
34756 struct lpfc_hba *phba = vport->phba;
34757 uint32_t evt_posted;
34758 - atomic_inc(&phba->num_cmd_success);
34759 + atomic_inc_unchecked(&phba->num_cmd_success);
34760
34761 if (vport->cfg_lun_queue_depth <= queue_depth)
34762 return;
34763 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34764 unsigned long num_rsrc_err, num_cmd_success;
34765 int i;
34766
34767 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34768 - num_cmd_success = atomic_read(&phba->num_cmd_success);
34769 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34770 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34771
34772 vports = lpfc_create_vport_work_array(phba);
34773 if (vports != NULL)
34774 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34775 }
34776 }
34777 lpfc_destroy_vport_work_array(phba, vports);
34778 - atomic_set(&phba->num_rsrc_err, 0);
34779 - atomic_set(&phba->num_cmd_success, 0);
34780 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34781 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34782 }
34783
34784 /**
34785 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34786 }
34787 }
34788 lpfc_destroy_vport_work_array(phba, vports);
34789 - atomic_set(&phba->num_rsrc_err, 0);
34790 - atomic_set(&phba->num_cmd_success, 0);
34791 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34792 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34793 }
34794
34795 /**
34796 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34797 index 5163edb..7b142bc 100644
34798 --- a/drivers/scsi/pmcraid.c
34799 +++ b/drivers/scsi/pmcraid.c
34800 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34801 res->scsi_dev = scsi_dev;
34802 scsi_dev->hostdata = res;
34803 res->change_detected = 0;
34804 - atomic_set(&res->read_failures, 0);
34805 - atomic_set(&res->write_failures, 0);
34806 + atomic_set_unchecked(&res->read_failures, 0);
34807 + atomic_set_unchecked(&res->write_failures, 0);
34808 rc = 0;
34809 }
34810 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34811 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34812
34813 /* If this was a SCSI read/write command keep count of errors */
34814 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34815 - atomic_inc(&res->read_failures);
34816 + atomic_inc_unchecked(&res->read_failures);
34817 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34818 - atomic_inc(&res->write_failures);
34819 + atomic_inc_unchecked(&res->write_failures);
34820
34821 if (!RES_IS_GSCSI(res->cfg_entry) &&
34822 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34823 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34824 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34825 * hrrq_id assigned here in queuecommand
34826 */
34827 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34828 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34829 pinstance->num_hrrq;
34830 cmd->cmd_done = pmcraid_io_done;
34831
34832 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34833 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34834 * hrrq_id assigned here in queuecommand
34835 */
34836 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34837 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34838 pinstance->num_hrrq;
34839
34840 if (request_size) {
34841 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34842
34843 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34844 /* add resources only after host is added into system */
34845 - if (!atomic_read(&pinstance->expose_resources))
34846 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34847 return;
34848
34849 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34850 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34851 init_waitqueue_head(&pinstance->reset_wait_q);
34852
34853 atomic_set(&pinstance->outstanding_cmds, 0);
34854 - atomic_set(&pinstance->last_message_id, 0);
34855 - atomic_set(&pinstance->expose_resources, 0);
34856 + atomic_set_unchecked(&pinstance->last_message_id, 0);
34857 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34858
34859 INIT_LIST_HEAD(&pinstance->free_res_q);
34860 INIT_LIST_HEAD(&pinstance->used_res_q);
34861 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34862 /* Schedule worker thread to handle CCN and take care of adding and
34863 * removing devices to OS
34864 */
34865 - atomic_set(&pinstance->expose_resources, 1);
34866 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34867 schedule_work(&pinstance->worker_q);
34868 return rc;
34869
34870 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34871 index ca496c7..9c791d5 100644
34872 --- a/drivers/scsi/pmcraid.h
34873 +++ b/drivers/scsi/pmcraid.h
34874 @@ -748,7 +748,7 @@ struct pmcraid_instance {
34875 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34876
34877 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34878 - atomic_t last_message_id;
34879 + atomic_unchecked_t last_message_id;
34880
34881 /* configuration table */
34882 struct pmcraid_config_table *cfg_table;
34883 @@ -777,7 +777,7 @@ struct pmcraid_instance {
34884 atomic_t outstanding_cmds;
34885
34886 /* should add/delete resources to mid-layer now ?*/
34887 - atomic_t expose_resources;
34888 + atomic_unchecked_t expose_resources;
34889
34890
34891
34892 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34893 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34894 };
34895 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34896 - atomic_t read_failures; /* count of failed READ commands */
34897 - atomic_t write_failures; /* count of failed WRITE commands */
34898 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34899 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34900
34901 /* To indicate add/delete/modify during CCN */
34902 u8 change_detected;
34903 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34904 index fcf052c..a8025a4 100644
34905 --- a/drivers/scsi/qla2xxx/qla_def.h
34906 +++ b/drivers/scsi/qla2xxx/qla_def.h
34907 @@ -2244,7 +2244,7 @@ struct isp_operations {
34908 int (*get_flash_version) (struct scsi_qla_host *, void *);
34909 int (*start_scsi) (srb_t *);
34910 int (*abort_isp) (struct scsi_qla_host *);
34911 -};
34912 +} __no_const;
34913
34914 /* MSI-X Support *************************************************************/
34915
34916 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34917 index fd5edc6..4906148 100644
34918 --- a/drivers/scsi/qla4xxx/ql4_def.h
34919 +++ b/drivers/scsi/qla4xxx/ql4_def.h
34920 @@ -258,7 +258,7 @@ struct ddb_entry {
34921 * (4000 only) */
34922 atomic_t relogin_timer; /* Max Time to wait for
34923 * relogin to complete */
34924 - atomic_t relogin_retry_count; /* Num of times relogin has been
34925 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34926 * retried */
34927 uint32_t default_time2wait; /* Default Min time between
34928 * relogins (+aens) */
34929 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34930 index 4169c8b..a8b896b 100644
34931 --- a/drivers/scsi/qla4xxx/ql4_os.c
34932 +++ b/drivers/scsi/qla4xxx/ql4_os.c
34933 @@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34934 */
34935 if (!iscsi_is_session_online(cls_sess)) {
34936 /* Reset retry relogin timer */
34937 - atomic_inc(&ddb_entry->relogin_retry_count);
34938 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34939 DEBUG2(ql4_printk(KERN_INFO, ha,
34940 "%s: index[%d] relogin timed out-retrying"
34941 " relogin (%d), retry (%d)\n", __func__,
34942 ddb_entry->fw_ddb_index,
34943 - atomic_read(&ddb_entry->relogin_retry_count),
34944 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34945 ddb_entry->default_time2wait + 4));
34946 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34947 atomic_set(&ddb_entry->retry_relogin_timer,
34948 @@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34949
34950 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34951 atomic_set(&ddb_entry->relogin_timer, 0);
34952 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34953 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34954
34955 ddb_entry->default_relogin_timeout =
34956 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34957 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34958 index 2aeb2e9..46e3925 100644
34959 --- a/drivers/scsi/scsi.c
34960 +++ b/drivers/scsi/scsi.c
34961 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34962 unsigned long timeout;
34963 int rtn = 0;
34964
34965 - atomic_inc(&cmd->device->iorequest_cnt);
34966 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34967
34968 /* check if the device is still usable */
34969 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34970 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34971 index f85cfa6..a57c9e8 100644
34972 --- a/drivers/scsi/scsi_lib.c
34973 +++ b/drivers/scsi/scsi_lib.c
34974 @@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34975 shost = sdev->host;
34976 scsi_init_cmd_errh(cmd);
34977 cmd->result = DID_NO_CONNECT << 16;
34978 - atomic_inc(&cmd->device->iorequest_cnt);
34979 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34980
34981 /*
34982 * SCSI request completion path will do scsi_device_unbusy(),
34983 @@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34984
34985 INIT_LIST_HEAD(&cmd->eh_entry);
34986
34987 - atomic_inc(&cmd->device->iodone_cnt);
34988 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34989 if (cmd->result)
34990 - atomic_inc(&cmd->device->ioerr_cnt);
34991 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34992
34993 disposition = scsi_decide_disposition(cmd);
34994 if (disposition != SUCCESS &&
34995 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34996 index 04c2a27..9d8bd66 100644
34997 --- a/drivers/scsi/scsi_sysfs.c
34998 +++ b/drivers/scsi/scsi_sysfs.c
34999 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
35000 char *buf) \
35001 { \
35002 struct scsi_device *sdev = to_scsi_device(dev); \
35003 - unsigned long long count = atomic_read(&sdev->field); \
35004 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
35005 return snprintf(buf, 20, "0x%llx\n", count); \
35006 } \
35007 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
35008 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
35009 index 84a1fdf..693b0d6 100644
35010 --- a/drivers/scsi/scsi_tgt_lib.c
35011 +++ b/drivers/scsi/scsi_tgt_lib.c
35012 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
35013 int err;
35014
35015 dprintk("%lx %u\n", uaddr, len);
35016 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
35017 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
35018 if (err) {
35019 /*
35020 * TODO: need to fixup sg_tablesize, max_segment_size,
35021 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
35022 index 1b21491..1b7f60e 100644
35023 --- a/drivers/scsi/scsi_transport_fc.c
35024 +++ b/drivers/scsi/scsi_transport_fc.c
35025 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
35026 * Netlink Infrastructure
35027 */
35028
35029 -static atomic_t fc_event_seq;
35030 +static atomic_unchecked_t fc_event_seq;
35031
35032 /**
35033 * fc_get_event_number - Obtain the next sequential FC event number
35034 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
35035 u32
35036 fc_get_event_number(void)
35037 {
35038 - return atomic_add_return(1, &fc_event_seq);
35039 + return atomic_add_return_unchecked(1, &fc_event_seq);
35040 }
35041 EXPORT_SYMBOL(fc_get_event_number);
35042
35043 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
35044 {
35045 int error;
35046
35047 - atomic_set(&fc_event_seq, 0);
35048 + atomic_set_unchecked(&fc_event_seq, 0);
35049
35050 error = transport_class_register(&fc_host_class);
35051 if (error)
35052 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
35053 char *cp;
35054
35055 *val = simple_strtoul(buf, &cp, 0);
35056 - if ((*cp && (*cp != '\n')) || (*val < 0))
35057 + if (*cp && (*cp != '\n'))
35058 return -EINVAL;
35059 /*
35060 * Check for overflow; dev_loss_tmo is u32
35061 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
35062 index 96029e6..4d77fa0 100644
35063 --- a/drivers/scsi/scsi_transport_iscsi.c
35064 +++ b/drivers/scsi/scsi_transport_iscsi.c
35065 @@ -79,7 +79,7 @@ struct iscsi_internal {
35066 struct transport_container session_cont;
35067 };
35068
35069 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
35070 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
35071 static struct workqueue_struct *iscsi_eh_timer_workq;
35072
35073 static DEFINE_IDA(iscsi_sess_ida);
35074 @@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
35075 int err;
35076
35077 ihost = shost->shost_data;
35078 - session->sid = atomic_add_return(1, &iscsi_session_nr);
35079 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
35080
35081 if (target_id == ISCSI_MAX_TARGET) {
35082 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
35083 @@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
35084 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
35085 ISCSI_TRANSPORT_VERSION);
35086
35087 - atomic_set(&iscsi_session_nr, 0);
35088 + atomic_set_unchecked(&iscsi_session_nr, 0);
35089
35090 err = class_register(&iscsi_transport_class);
35091 if (err)
35092 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
35093 index 21a045e..ec89e03 100644
35094 --- a/drivers/scsi/scsi_transport_srp.c
35095 +++ b/drivers/scsi/scsi_transport_srp.c
35096 @@ -33,7 +33,7 @@
35097 #include "scsi_transport_srp_internal.h"
35098
35099 struct srp_host_attrs {
35100 - atomic_t next_port_id;
35101 + atomic_unchecked_t next_port_id;
35102 };
35103 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
35104
35105 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
35106 struct Scsi_Host *shost = dev_to_shost(dev);
35107 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
35108
35109 - atomic_set(&srp_host->next_port_id, 0);
35110 + atomic_set_unchecked(&srp_host->next_port_id, 0);
35111 return 0;
35112 }
35113
35114 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
35115 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
35116 rport->roles = ids->roles;
35117
35118 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
35119 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
35120 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
35121
35122 transport_setup_device(&rport->dev);
35123 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
35124 index 441a1c5..07cece7 100644
35125 --- a/drivers/scsi/sg.c
35126 +++ b/drivers/scsi/sg.c
35127 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
35128 sdp->disk->disk_name,
35129 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
35130 NULL,
35131 - (char *)arg);
35132 + (char __user *)arg);
35133 case BLKTRACESTART:
35134 return blk_trace_startstop(sdp->device->request_queue, 1);
35135 case BLKTRACESTOP:
35136 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
35137 const struct file_operations * fops;
35138 };
35139
35140 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
35141 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
35142 {"allow_dio", &adio_fops},
35143 {"debug", &debug_fops},
35144 {"def_reserved_size", &dressz_fops},
35145 @@ -2327,7 +2327,7 @@ sg_proc_init(void)
35146 {
35147 int k, mask;
35148 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
35149 - struct sg_proc_leaf * leaf;
35150 + const struct sg_proc_leaf * leaf;
35151
35152 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
35153 if (!sg_proc_sgp)
35154 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
35155 index f64250e..1ee3049 100644
35156 --- a/drivers/spi/spi-dw-pci.c
35157 +++ b/drivers/spi/spi-dw-pci.c
35158 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
35159 #define spi_resume NULL
35160 #endif
35161
35162 -static const struct pci_device_id pci_ids[] __devinitdata = {
35163 +static const struct pci_device_id pci_ids[] __devinitconst = {
35164 /* Intel MID platform SPI controller 0 */
35165 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
35166 {},
35167 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
35168 index 77eae99..b7cdcc9 100644
35169 --- a/drivers/spi/spi.c
35170 +++ b/drivers/spi/spi.c
35171 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
35172 EXPORT_SYMBOL_GPL(spi_bus_unlock);
35173
35174 /* portable code must never pass more than 32 bytes */
35175 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
35176 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
35177
35178 static u8 *buf;
35179
35180 diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
35181 index 436fe97..4082570 100644
35182 --- a/drivers/staging/gma500/power.c
35183 +++ b/drivers/staging/gma500/power.c
35184 @@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
35185 ret = gma_resume_pci(dev->pdev);
35186 if (ret == 0) {
35187 /* FIXME: we want to defer this for Medfield/Oaktrail */
35188 - gma_resume_display(dev);
35189 + gma_resume_display(dev->pdev);
35190 psb_irq_preinstall(dev);
35191 psb_irq_postinstall(dev);
35192 pm_runtime_get(&dev->pdev->dev);
35193 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
35194 index bafccb3..e3ac78d 100644
35195 --- a/drivers/staging/hv/rndis_filter.c
35196 +++ b/drivers/staging/hv/rndis_filter.c
35197 @@ -42,7 +42,7 @@ struct rndis_device {
35198
35199 enum rndis_device_state state;
35200 bool link_state;
35201 - atomic_t new_req_id;
35202 + atomic_unchecked_t new_req_id;
35203
35204 spinlock_t request_lock;
35205 struct list_head req_list;
35206 @@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35207 * template
35208 */
35209 set = &rndis_msg->msg.set_req;
35210 - set->req_id = atomic_inc_return(&dev->new_req_id);
35211 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35212
35213 /* Add to the request list */
35214 spin_lock_irqsave(&dev->request_lock, flags);
35215 @@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35216
35217 /* Setup the rndis set */
35218 halt = &request->request_msg.msg.halt_req;
35219 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35220 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35221
35222 /* Ignore return since this msg is optional. */
35223 rndis_filter_send_request(dev, request);
35224 diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
35225 index 9e8f010..af9efb5 100644
35226 --- a/drivers/staging/iio/buffer_generic.h
35227 +++ b/drivers/staging/iio/buffer_generic.h
35228 @@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
35229
35230 int (*is_enabled)(struct iio_buffer *buffer);
35231 int (*enable)(struct iio_buffer *buffer);
35232 -};
35233 +} __no_const;
35234
35235 /**
35236 * struct iio_buffer_setup_ops - buffer setup related callbacks
35237 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
35238 index 8b307b4..a97ac91 100644
35239 --- a/drivers/staging/octeon/ethernet-rx.c
35240 +++ b/drivers/staging/octeon/ethernet-rx.c
35241 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35242 /* Increment RX stats for virtual ports */
35243 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35244 #ifdef CONFIG_64BIT
35245 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35246 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35247 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35248 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35249 #else
35250 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35251 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35252 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35253 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35254 #endif
35255 }
35256 netif_receive_skb(skb);
35257 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35258 dev->name);
35259 */
35260 #ifdef CONFIG_64BIT
35261 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35262 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35263 #else
35264 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35265 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35266 #endif
35267 dev_kfree_skb_irq(skb);
35268 }
35269 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
35270 index 076f866..2308070 100644
35271 --- a/drivers/staging/octeon/ethernet.c
35272 +++ b/drivers/staging/octeon/ethernet.c
35273 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
35274 * since the RX tasklet also increments it.
35275 */
35276 #ifdef CONFIG_64BIT
35277 - atomic64_add(rx_status.dropped_packets,
35278 - (atomic64_t *)&priv->stats.rx_dropped);
35279 + atomic64_add_unchecked(rx_status.dropped_packets,
35280 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35281 #else
35282 - atomic_add(rx_status.dropped_packets,
35283 - (atomic_t *)&priv->stats.rx_dropped);
35284 + atomic_add_unchecked(rx_status.dropped_packets,
35285 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
35286 #endif
35287 }
35288
35289 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
35290 index 7a19555..466456d 100644
35291 --- a/drivers/staging/pohmelfs/inode.c
35292 +++ b/drivers/staging/pohmelfs/inode.c
35293 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35294 mutex_init(&psb->mcache_lock);
35295 psb->mcache_root = RB_ROOT;
35296 psb->mcache_timeout = msecs_to_jiffies(5000);
35297 - atomic_long_set(&psb->mcache_gen, 0);
35298 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
35299
35300 psb->trans_max_pages = 100;
35301
35302 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35303 INIT_LIST_HEAD(&psb->crypto_ready_list);
35304 INIT_LIST_HEAD(&psb->crypto_active_list);
35305
35306 - atomic_set(&psb->trans_gen, 1);
35307 + atomic_set_unchecked(&psb->trans_gen, 1);
35308 atomic_long_set(&psb->total_inodes, 0);
35309
35310 mutex_init(&psb->state_lock);
35311 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35312 index e22665c..a2a9390 100644
35313 --- a/drivers/staging/pohmelfs/mcache.c
35314 +++ b/drivers/staging/pohmelfs/mcache.c
35315 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35316 m->data = data;
35317 m->start = start;
35318 m->size = size;
35319 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
35320 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35321
35322 mutex_lock(&psb->mcache_lock);
35323 err = pohmelfs_mcache_insert(psb, m);
35324 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35325 index 985b6b7..7699e05 100644
35326 --- a/drivers/staging/pohmelfs/netfs.h
35327 +++ b/drivers/staging/pohmelfs/netfs.h
35328 @@ -571,14 +571,14 @@ struct pohmelfs_config;
35329 struct pohmelfs_sb {
35330 struct rb_root mcache_root;
35331 struct mutex mcache_lock;
35332 - atomic_long_t mcache_gen;
35333 + atomic_long_unchecked_t mcache_gen;
35334 unsigned long mcache_timeout;
35335
35336 unsigned int idx;
35337
35338 unsigned int trans_retries;
35339
35340 - atomic_t trans_gen;
35341 + atomic_unchecked_t trans_gen;
35342
35343 unsigned int crypto_attached_size;
35344 unsigned int crypto_align_size;
35345 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35346 index 06c1a74..866eebc 100644
35347 --- a/drivers/staging/pohmelfs/trans.c
35348 +++ b/drivers/staging/pohmelfs/trans.c
35349 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35350 int err;
35351 struct netfs_cmd *cmd = t->iovec.iov_base;
35352
35353 - t->gen = atomic_inc_return(&psb->trans_gen);
35354 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35355
35356 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35357 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35358 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35359 index 86308a0..feaa925 100644
35360 --- a/drivers/staging/rtl8712/rtl871x_io.h
35361 +++ b/drivers/staging/rtl8712/rtl871x_io.h
35362 @@ -108,7 +108,7 @@ struct _io_ops {
35363 u8 *pmem);
35364 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35365 u8 *pmem);
35366 -};
35367 +} __no_const;
35368
35369 struct io_req {
35370 struct list_head list;
35371 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35372 index c7b5e8b..783d6cb 100644
35373 --- a/drivers/staging/sbe-2t3e3/netdev.c
35374 +++ b/drivers/staging/sbe-2t3e3/netdev.c
35375 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35376 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35377
35378 if (rlen)
35379 - if (copy_to_user(data, &resp, rlen))
35380 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35381 return -EFAULT;
35382
35383 return 0;
35384 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35385 index be21617..0954e45 100644
35386 --- a/drivers/staging/usbip/usbip_common.h
35387 +++ b/drivers/staging/usbip/usbip_common.h
35388 @@ -289,7 +289,7 @@ struct usbip_device {
35389 void (*shutdown)(struct usbip_device *);
35390 void (*reset)(struct usbip_device *);
35391 void (*unusable)(struct usbip_device *);
35392 - } eh_ops;
35393 + } __no_const eh_ops;
35394 };
35395
35396 #if 0
35397 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35398 index 88b3298..3783eee 100644
35399 --- a/drivers/staging/usbip/vhci.h
35400 +++ b/drivers/staging/usbip/vhci.h
35401 @@ -88,7 +88,7 @@ struct vhci_hcd {
35402 unsigned resuming:1;
35403 unsigned long re_timeout;
35404
35405 - atomic_t seqnum;
35406 + atomic_unchecked_t seqnum;
35407
35408 /*
35409 * NOTE:
35410 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35411 index 2ee97e2..0420b86 100644
35412 --- a/drivers/staging/usbip/vhci_hcd.c
35413 +++ b/drivers/staging/usbip/vhci_hcd.c
35414 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35415 return;
35416 }
35417
35418 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35419 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35420 if (priv->seqnum == 0xffff)
35421 dev_info(&urb->dev->dev, "seqnum max\n");
35422
35423 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35424 return -ENOMEM;
35425 }
35426
35427 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35428 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35429 if (unlink->seqnum == 0xffff)
35430 pr_info("seqnum max\n");
35431
35432 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35433 vdev->rhport = rhport;
35434 }
35435
35436 - atomic_set(&vhci->seqnum, 0);
35437 + atomic_set_unchecked(&vhci->seqnum, 0);
35438 spin_lock_init(&vhci->lock);
35439
35440 hcd->power_budget = 0; /* no limit */
35441 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35442 index 3872b8c..fe6d2f4 100644
35443 --- a/drivers/staging/usbip/vhci_rx.c
35444 +++ b/drivers/staging/usbip/vhci_rx.c
35445 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35446 if (!urb) {
35447 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35448 pr_info("max seqnum %d\n",
35449 - atomic_read(&the_controller->seqnum));
35450 + atomic_read_unchecked(&the_controller->seqnum));
35451 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35452 return;
35453 }
35454 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35455 index 7735027..30eed13 100644
35456 --- a/drivers/staging/vt6655/hostap.c
35457 +++ b/drivers/staging/vt6655/hostap.c
35458 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35459 *
35460 */
35461
35462 +static net_device_ops_no_const apdev_netdev_ops;
35463 +
35464 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35465 {
35466 PSDevice apdev_priv;
35467 struct net_device *dev = pDevice->dev;
35468 int ret;
35469 - const struct net_device_ops apdev_netdev_ops = {
35470 - .ndo_start_xmit = pDevice->tx_80211,
35471 - };
35472
35473 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35474
35475 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35476 *apdev_priv = *pDevice;
35477 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35478
35479 + /* only half broken now */
35480 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35481 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35482
35483 pDevice->apdev->type = ARPHRD_IEEE80211;
35484 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35485 index 51b5adf..098e320 100644
35486 --- a/drivers/staging/vt6656/hostap.c
35487 +++ b/drivers/staging/vt6656/hostap.c
35488 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35489 *
35490 */
35491
35492 +static net_device_ops_no_const apdev_netdev_ops;
35493 +
35494 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35495 {
35496 PSDevice apdev_priv;
35497 struct net_device *dev = pDevice->dev;
35498 int ret;
35499 - const struct net_device_ops apdev_netdev_ops = {
35500 - .ndo_start_xmit = pDevice->tx_80211,
35501 - };
35502
35503 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35504
35505 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35506 *apdev_priv = *pDevice;
35507 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35508
35509 + /* only half broken now */
35510 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35511 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35512
35513 pDevice->apdev->type = ARPHRD_IEEE80211;
35514 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35515 index 7843dfd..3db105f 100644
35516 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
35517 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35518 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35519
35520 struct usbctlx_completor {
35521 int (*complete) (struct usbctlx_completor *);
35522 -};
35523 +} __no_const;
35524
35525 static int
35526 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35527 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35528 index 1ca66ea..76f1343 100644
35529 --- a/drivers/staging/zcache/tmem.c
35530 +++ b/drivers/staging/zcache/tmem.c
35531 @@ -39,7 +39,7 @@
35532 * A tmem host implementation must use this function to register callbacks
35533 * for memory allocation.
35534 */
35535 -static struct tmem_hostops tmem_hostops;
35536 +static tmem_hostops_no_const tmem_hostops;
35537
35538 static void tmem_objnode_tree_init(void);
35539
35540 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35541 * A tmem host implementation must use this function to register
35542 * callbacks for a page-accessible memory (PAM) implementation
35543 */
35544 -static struct tmem_pamops tmem_pamops;
35545 +static tmem_pamops_no_const tmem_pamops;
35546
35547 void tmem_register_pamops(struct tmem_pamops *m)
35548 {
35549 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35550 index ed147c4..94fc3c6 100644
35551 --- a/drivers/staging/zcache/tmem.h
35552 +++ b/drivers/staging/zcache/tmem.h
35553 @@ -180,6 +180,7 @@ struct tmem_pamops {
35554 void (*new_obj)(struct tmem_obj *);
35555 int (*replace_in_obj)(void *, struct tmem_obj *);
35556 };
35557 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35558 extern void tmem_register_pamops(struct tmem_pamops *m);
35559
35560 /* memory allocation methods provided by the host implementation */
35561 @@ -189,6 +190,7 @@ struct tmem_hostops {
35562 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35563 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35564 };
35565 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35566 extern void tmem_register_hostops(struct tmem_hostops *m);
35567
35568 /* core tmem accessor functions */
35569 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35570 index 0c1d5c73..88e90a8 100644
35571 --- a/drivers/target/iscsi/iscsi_target.c
35572 +++ b/drivers/target/iscsi/iscsi_target.c
35573 @@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35574 * outstanding_r2ts reaches zero, go ahead and send the delayed
35575 * TASK_ABORTED status.
35576 */
35577 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35578 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35579 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35580 if (--cmd->outstanding_r2ts < 1) {
35581 iscsit_stop_dataout_timer(cmd);
35582 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35583 index 6845228..df77141 100644
35584 --- a/drivers/target/target_core_tmr.c
35585 +++ b/drivers/target/target_core_tmr.c
35586 @@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35587 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35588 cmd->t_task_list_num,
35589 atomic_read(&cmd->t_task_cdbs_left),
35590 - atomic_read(&cmd->t_task_cdbs_sent),
35591 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35592 atomic_read(&cmd->t_transport_active),
35593 atomic_read(&cmd->t_transport_stop),
35594 atomic_read(&cmd->t_transport_sent));
35595 @@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35596 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35597 " task: %p, t_fe_count: %d dev: %p\n", task,
35598 fe_count, dev);
35599 - atomic_set(&cmd->t_transport_aborted, 1);
35600 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35601 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35602
35603 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35604 @@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35605 }
35606 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35607 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35608 - atomic_set(&cmd->t_transport_aborted, 1);
35609 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35610 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35611
35612 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35613 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35614 index 861628e..659ae80 100644
35615 --- a/drivers/target/target_core_transport.c
35616 +++ b/drivers/target/target_core_transport.c
35617 @@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35618
35619 dev->queue_depth = dev_limits->queue_depth;
35620 atomic_set(&dev->depth_left, dev->queue_depth);
35621 - atomic_set(&dev->dev_ordered_id, 0);
35622 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
35623
35624 se_dev_set_default_attribs(dev, dev_limits);
35625
35626 @@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35627 * Used to determine when ORDERED commands should go from
35628 * Dormant to Active status.
35629 */
35630 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35631 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35632 smp_mb__after_atomic_inc();
35633 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35634 cmd->se_ordered_id, cmd->sam_task_attr,
35635 @@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35636 " t_transport_active: %d t_transport_stop: %d"
35637 " t_transport_sent: %d\n", cmd->t_task_list_num,
35638 atomic_read(&cmd->t_task_cdbs_left),
35639 - atomic_read(&cmd->t_task_cdbs_sent),
35640 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35641 atomic_read(&cmd->t_task_cdbs_ex_left),
35642 atomic_read(&cmd->t_transport_active),
35643 atomic_read(&cmd->t_transport_stop),
35644 @@ -2089,9 +2089,9 @@ check_depth:
35645
35646 spin_lock_irqsave(&cmd->t_state_lock, flags);
35647 task->task_flags |= (TF_ACTIVE | TF_SENT);
35648 - atomic_inc(&cmd->t_task_cdbs_sent);
35649 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35650
35651 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
35652 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35653 cmd->t_task_list_num)
35654 atomic_set(&cmd->t_transport_sent, 1);
35655
35656 @@ -4273,7 +4273,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35657 atomic_set(&cmd->transport_lun_stop, 0);
35658 }
35659 if (!atomic_read(&cmd->t_transport_active) ||
35660 - atomic_read(&cmd->t_transport_aborted)) {
35661 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
35662 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35663 return false;
35664 }
35665 @@ -4522,7 +4522,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35666 {
35667 int ret = 0;
35668
35669 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
35670 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35671 if (!send_status ||
35672 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35673 return 1;
35674 @@ -4559,7 +4559,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35675 */
35676 if (cmd->data_direction == DMA_TO_DEVICE) {
35677 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35678 - atomic_inc(&cmd->t_transport_aborted);
35679 + atomic_inc_unchecked(&cmd->t_transport_aborted);
35680 smp_mb__after_atomic_inc();
35681 }
35682 }
35683 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35684 index b9040be..e3f5aab 100644
35685 --- a/drivers/tty/hvc/hvcs.c
35686 +++ b/drivers/tty/hvc/hvcs.c
35687 @@ -83,6 +83,7 @@
35688 #include <asm/hvcserver.h>
35689 #include <asm/uaccess.h>
35690 #include <asm/vio.h>
35691 +#include <asm/local.h>
35692
35693 /*
35694 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35695 @@ -270,7 +271,7 @@ struct hvcs_struct {
35696 unsigned int index;
35697
35698 struct tty_struct *tty;
35699 - int open_count;
35700 + local_t open_count;
35701
35702 /*
35703 * Used to tell the driver kernel_thread what operations need to take
35704 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35705
35706 spin_lock_irqsave(&hvcsd->lock, flags);
35707
35708 - if (hvcsd->open_count > 0) {
35709 + if (local_read(&hvcsd->open_count) > 0) {
35710 spin_unlock_irqrestore(&hvcsd->lock, flags);
35711 printk(KERN_INFO "HVCS: vterm state unchanged. "
35712 "The hvcs device node is still in use.\n");
35713 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35714 if ((retval = hvcs_partner_connect(hvcsd)))
35715 goto error_release;
35716
35717 - hvcsd->open_count = 1;
35718 + local_set(&hvcsd->open_count, 1);
35719 hvcsd->tty = tty;
35720 tty->driver_data = hvcsd;
35721
35722 @@ -1179,7 +1180,7 @@ fast_open:
35723
35724 spin_lock_irqsave(&hvcsd->lock, flags);
35725 kref_get(&hvcsd->kref);
35726 - hvcsd->open_count++;
35727 + local_inc(&hvcsd->open_count);
35728 hvcsd->todo_mask |= HVCS_SCHED_READ;
35729 spin_unlock_irqrestore(&hvcsd->lock, flags);
35730
35731 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35732 hvcsd = tty->driver_data;
35733
35734 spin_lock_irqsave(&hvcsd->lock, flags);
35735 - if (--hvcsd->open_count == 0) {
35736 + if (local_dec_and_test(&hvcsd->open_count)) {
35737
35738 vio_disable_interrupts(hvcsd->vdev);
35739
35740 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35741 free_irq(irq, hvcsd);
35742 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35743 return;
35744 - } else if (hvcsd->open_count < 0) {
35745 + } else if (local_read(&hvcsd->open_count) < 0) {
35746 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35747 " is missmanaged.\n",
35748 - hvcsd->vdev->unit_address, hvcsd->open_count);
35749 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35750 }
35751
35752 spin_unlock_irqrestore(&hvcsd->lock, flags);
35753 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35754
35755 spin_lock_irqsave(&hvcsd->lock, flags);
35756 /* Preserve this so that we know how many kref refs to put */
35757 - temp_open_count = hvcsd->open_count;
35758 + temp_open_count = local_read(&hvcsd->open_count);
35759
35760 /*
35761 * Don't kref put inside the spinlock because the destruction
35762 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35763 hvcsd->tty->driver_data = NULL;
35764 hvcsd->tty = NULL;
35765
35766 - hvcsd->open_count = 0;
35767 + local_set(&hvcsd->open_count, 0);
35768
35769 /* This will drop any buffered data on the floor which is OK in a hangup
35770 * scenario. */
35771 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35772 * the middle of a write operation? This is a crummy place to do this
35773 * but we want to keep it all in the spinlock.
35774 */
35775 - if (hvcsd->open_count <= 0) {
35776 + if (local_read(&hvcsd->open_count) <= 0) {
35777 spin_unlock_irqrestore(&hvcsd->lock, flags);
35778 return -ENODEV;
35779 }
35780 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35781 {
35782 struct hvcs_struct *hvcsd = tty->driver_data;
35783
35784 - if (!hvcsd || hvcsd->open_count <= 0)
35785 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35786 return 0;
35787
35788 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35789 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35790 index ef92869..f4ebd88 100644
35791 --- a/drivers/tty/ipwireless/tty.c
35792 +++ b/drivers/tty/ipwireless/tty.c
35793 @@ -29,6 +29,7 @@
35794 #include <linux/tty_driver.h>
35795 #include <linux/tty_flip.h>
35796 #include <linux/uaccess.h>
35797 +#include <asm/local.h>
35798
35799 #include "tty.h"
35800 #include "network.h"
35801 @@ -51,7 +52,7 @@ struct ipw_tty {
35802 int tty_type;
35803 struct ipw_network *network;
35804 struct tty_struct *linux_tty;
35805 - int open_count;
35806 + local_t open_count;
35807 unsigned int control_lines;
35808 struct mutex ipw_tty_mutex;
35809 int tx_bytes_queued;
35810 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35811 mutex_unlock(&tty->ipw_tty_mutex);
35812 return -ENODEV;
35813 }
35814 - if (tty->open_count == 0)
35815 + if (local_read(&tty->open_count) == 0)
35816 tty->tx_bytes_queued = 0;
35817
35818 - tty->open_count++;
35819 + local_inc(&tty->open_count);
35820
35821 tty->linux_tty = linux_tty;
35822 linux_tty->driver_data = tty;
35823 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35824
35825 static void do_ipw_close(struct ipw_tty *tty)
35826 {
35827 - tty->open_count--;
35828 -
35829 - if (tty->open_count == 0) {
35830 + if (local_dec_return(&tty->open_count) == 0) {
35831 struct tty_struct *linux_tty = tty->linux_tty;
35832
35833 if (linux_tty != NULL) {
35834 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35835 return;
35836
35837 mutex_lock(&tty->ipw_tty_mutex);
35838 - if (tty->open_count == 0) {
35839 + if (local_read(&tty->open_count) == 0) {
35840 mutex_unlock(&tty->ipw_tty_mutex);
35841 return;
35842 }
35843 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35844 return;
35845 }
35846
35847 - if (!tty->open_count) {
35848 + if (!local_read(&tty->open_count)) {
35849 mutex_unlock(&tty->ipw_tty_mutex);
35850 return;
35851 }
35852 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35853 return -ENODEV;
35854
35855 mutex_lock(&tty->ipw_tty_mutex);
35856 - if (!tty->open_count) {
35857 + if (!local_read(&tty->open_count)) {
35858 mutex_unlock(&tty->ipw_tty_mutex);
35859 return -EINVAL;
35860 }
35861 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35862 if (!tty)
35863 return -ENODEV;
35864
35865 - if (!tty->open_count)
35866 + if (!local_read(&tty->open_count))
35867 return -EINVAL;
35868
35869 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35870 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35871 if (!tty)
35872 return 0;
35873
35874 - if (!tty->open_count)
35875 + if (!local_read(&tty->open_count))
35876 return 0;
35877
35878 return tty->tx_bytes_queued;
35879 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35880 if (!tty)
35881 return -ENODEV;
35882
35883 - if (!tty->open_count)
35884 + if (!local_read(&tty->open_count))
35885 return -EINVAL;
35886
35887 return get_control_lines(tty);
35888 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35889 if (!tty)
35890 return -ENODEV;
35891
35892 - if (!tty->open_count)
35893 + if (!local_read(&tty->open_count))
35894 return -EINVAL;
35895
35896 return set_control_lines(tty, set, clear);
35897 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35898 if (!tty)
35899 return -ENODEV;
35900
35901 - if (!tty->open_count)
35902 + if (!local_read(&tty->open_count))
35903 return -EINVAL;
35904
35905 /* FIXME: Exactly how is the tty object locked here .. */
35906 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35907 against a parallel ioctl etc */
35908 mutex_lock(&ttyj->ipw_tty_mutex);
35909 }
35910 - while (ttyj->open_count)
35911 + while (local_read(&ttyj->open_count))
35912 do_ipw_close(ttyj);
35913 ipwireless_disassociate_network_ttys(network,
35914 ttyj->channel_idx);
35915 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35916 index fc7bbba..9527e93 100644
35917 --- a/drivers/tty/n_gsm.c
35918 +++ b/drivers/tty/n_gsm.c
35919 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35920 kref_init(&dlci->ref);
35921 mutex_init(&dlci->mutex);
35922 dlci->fifo = &dlci->_fifo;
35923 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35924 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35925 kfree(dlci);
35926 return NULL;
35927 }
35928 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35929 index 39d6ab6..eb97f41 100644
35930 --- a/drivers/tty/n_tty.c
35931 +++ b/drivers/tty/n_tty.c
35932 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35933 {
35934 *ops = tty_ldisc_N_TTY;
35935 ops->owner = NULL;
35936 - ops->refcount = ops->flags = 0;
35937 + atomic_set(&ops->refcount, 0);
35938 + ops->flags = 0;
35939 }
35940 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35941 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35942 index e18604b..a7d5a11 100644
35943 --- a/drivers/tty/pty.c
35944 +++ b/drivers/tty/pty.c
35945 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35946 register_sysctl_table(pty_root_table);
35947
35948 /* Now create the /dev/ptmx special device */
35949 + pax_open_kernel();
35950 tty_default_fops(&ptmx_fops);
35951 - ptmx_fops.open = ptmx_open;
35952 + *(void **)&ptmx_fops.open = ptmx_open;
35953 + pax_close_kernel();
35954
35955 cdev_init(&ptmx_cdev, &ptmx_fops);
35956 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35957 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35958 index 2b42a01..32a2ed3 100644
35959 --- a/drivers/tty/serial/kgdboc.c
35960 +++ b/drivers/tty/serial/kgdboc.c
35961 @@ -24,8 +24,9 @@
35962 #define MAX_CONFIG_LEN 40
35963
35964 static struct kgdb_io kgdboc_io_ops;
35965 +static struct kgdb_io kgdboc_io_ops_console;
35966
35967 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35968 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35969 static int configured = -1;
35970
35971 static char config[MAX_CONFIG_LEN];
35972 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35973 kgdboc_unregister_kbd();
35974 if (configured == 1)
35975 kgdb_unregister_io_module(&kgdboc_io_ops);
35976 + else if (configured == 2)
35977 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
35978 }
35979
35980 static int configure_kgdboc(void)
35981 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35982 int err;
35983 char *cptr = config;
35984 struct console *cons;
35985 + int is_console = 0;
35986
35987 err = kgdboc_option_setup(config);
35988 if (err || !strlen(config) || isspace(config[0]))
35989 goto noconfig;
35990
35991 err = -ENODEV;
35992 - kgdboc_io_ops.is_console = 0;
35993 kgdb_tty_driver = NULL;
35994
35995 kgdboc_use_kms = 0;
35996 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35997 int idx;
35998 if (cons->device && cons->device(cons, &idx) == p &&
35999 idx == tty_line) {
36000 - kgdboc_io_ops.is_console = 1;
36001 + is_console = 1;
36002 break;
36003 }
36004 cons = cons->next;
36005 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
36006 kgdb_tty_line = tty_line;
36007
36008 do_register:
36009 - err = kgdb_register_io_module(&kgdboc_io_ops);
36010 + if (is_console) {
36011 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
36012 + configured = 2;
36013 + } else {
36014 + err = kgdb_register_io_module(&kgdboc_io_ops);
36015 + configured = 1;
36016 + }
36017 if (err)
36018 goto noconfig;
36019
36020 - configured = 1;
36021 -
36022 return 0;
36023
36024 noconfig:
36025 @@ -213,7 +220,7 @@ noconfig:
36026 static int __init init_kgdboc(void)
36027 {
36028 /* Already configured? */
36029 - if (configured == 1)
36030 + if (configured >= 1)
36031 return 0;
36032
36033 return configure_kgdboc();
36034 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
36035 if (config[len - 1] == '\n')
36036 config[len - 1] = '\0';
36037
36038 - if (configured == 1)
36039 + if (configured >= 1)
36040 cleanup_kgdboc();
36041
36042 /* Go and configure with the new params. */
36043 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
36044 .post_exception = kgdboc_post_exp_handler,
36045 };
36046
36047 +static struct kgdb_io kgdboc_io_ops_console = {
36048 + .name = "kgdboc",
36049 + .read_char = kgdboc_get_char,
36050 + .write_char = kgdboc_put_char,
36051 + .pre_exception = kgdboc_pre_exp_handler,
36052 + .post_exception = kgdboc_post_exp_handler,
36053 + .is_console = 1
36054 +};
36055 +
36056 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
36057 /* This is only available if kgdboc is a built in for early debugging */
36058 static int __init kgdboc_early_init(char *opt)
36059 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
36060 index 05085be..67eadb0 100644
36061 --- a/drivers/tty/tty_io.c
36062 +++ b/drivers/tty/tty_io.c
36063 @@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
36064
36065 void tty_default_fops(struct file_operations *fops)
36066 {
36067 - *fops = tty_fops;
36068 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
36069 }
36070
36071 /*
36072 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
36073 index 8e0924f..4204eb4 100644
36074 --- a/drivers/tty/tty_ldisc.c
36075 +++ b/drivers/tty/tty_ldisc.c
36076 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
36077 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
36078 struct tty_ldisc_ops *ldo = ld->ops;
36079
36080 - ldo->refcount--;
36081 + atomic_dec(&ldo->refcount);
36082 module_put(ldo->owner);
36083 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36084
36085 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
36086 spin_lock_irqsave(&tty_ldisc_lock, flags);
36087 tty_ldiscs[disc] = new_ldisc;
36088 new_ldisc->num = disc;
36089 - new_ldisc->refcount = 0;
36090 + atomic_set(&new_ldisc->refcount, 0);
36091 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36092
36093 return ret;
36094 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
36095 return -EINVAL;
36096
36097 spin_lock_irqsave(&tty_ldisc_lock, flags);
36098 - if (tty_ldiscs[disc]->refcount)
36099 + if (atomic_read(&tty_ldiscs[disc]->refcount))
36100 ret = -EBUSY;
36101 else
36102 tty_ldiscs[disc] = NULL;
36103 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
36104 if (ldops) {
36105 ret = ERR_PTR(-EAGAIN);
36106 if (try_module_get(ldops->owner)) {
36107 - ldops->refcount++;
36108 + atomic_inc(&ldops->refcount);
36109 ret = ldops;
36110 }
36111 }
36112 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
36113 unsigned long flags;
36114
36115 spin_lock_irqsave(&tty_ldisc_lock, flags);
36116 - ldops->refcount--;
36117 + atomic_dec(&ldops->refcount);
36118 module_put(ldops->owner);
36119 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36120 }
36121 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
36122 index a605549..6bd3c96 100644
36123 --- a/drivers/tty/vt/keyboard.c
36124 +++ b/drivers/tty/vt/keyboard.c
36125 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
36126 kbd->kbdmode == VC_OFF) &&
36127 value != KVAL(K_SAK))
36128 return; /* SAK is allowed even in raw mode */
36129 +
36130 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
36131 + {
36132 + void *func = fn_handler[value];
36133 + if (func == fn_show_state || func == fn_show_ptregs ||
36134 + func == fn_show_mem)
36135 + return;
36136 + }
36137 +#endif
36138 +
36139 fn_handler[value](vc);
36140 }
36141
36142 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
36143 index 65447c5..0526f0a 100644
36144 --- a/drivers/tty/vt/vt_ioctl.c
36145 +++ b/drivers/tty/vt/vt_ioctl.c
36146 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
36147 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
36148 return -EFAULT;
36149
36150 - if (!capable(CAP_SYS_TTY_CONFIG))
36151 - perm = 0;
36152 -
36153 switch (cmd) {
36154 case KDGKBENT:
36155 key_map = key_maps[s];
36156 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
36157 val = (i ? K_HOLE : K_NOSUCHMAP);
36158 return put_user(val, &user_kbe->kb_value);
36159 case KDSKBENT:
36160 + if (!capable(CAP_SYS_TTY_CONFIG))
36161 + perm = 0;
36162 +
36163 if (!perm)
36164 return -EPERM;
36165 if (!i && v == K_NOSUCHMAP) {
36166 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
36167 int i, j, k;
36168 int ret;
36169
36170 - if (!capable(CAP_SYS_TTY_CONFIG))
36171 - perm = 0;
36172 -
36173 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
36174 if (!kbs) {
36175 ret = -ENOMEM;
36176 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
36177 kfree(kbs);
36178 return ((p && *p) ? -EOVERFLOW : 0);
36179 case KDSKBSENT:
36180 + if (!capable(CAP_SYS_TTY_CONFIG))
36181 + perm = 0;
36182 +
36183 if (!perm) {
36184 ret = -EPERM;
36185 goto reterr;
36186 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
36187 index a783d53..cb30d94 100644
36188 --- a/drivers/uio/uio.c
36189 +++ b/drivers/uio/uio.c
36190 @@ -25,6 +25,7 @@
36191 #include <linux/kobject.h>
36192 #include <linux/cdev.h>
36193 #include <linux/uio_driver.h>
36194 +#include <asm/local.h>
36195
36196 #define UIO_MAX_DEVICES (1U << MINORBITS)
36197
36198 @@ -32,10 +33,10 @@ struct uio_device {
36199 struct module *owner;
36200 struct device *dev;
36201 int minor;
36202 - atomic_t event;
36203 + atomic_unchecked_t event;
36204 struct fasync_struct *async_queue;
36205 wait_queue_head_t wait;
36206 - int vma_count;
36207 + local_t vma_count;
36208 struct uio_info *info;
36209 struct kobject *map_dir;
36210 struct kobject *portio_dir;
36211 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
36212 struct device_attribute *attr, char *buf)
36213 {
36214 struct uio_device *idev = dev_get_drvdata(dev);
36215 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36216 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36217 }
36218
36219 static struct device_attribute uio_class_attributes[] = {
36220 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
36221 {
36222 struct uio_device *idev = info->uio_dev;
36223
36224 - atomic_inc(&idev->event);
36225 + atomic_inc_unchecked(&idev->event);
36226 wake_up_interruptible(&idev->wait);
36227 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36228 }
36229 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
36230 }
36231
36232 listener->dev = idev;
36233 - listener->event_count = atomic_read(&idev->event);
36234 + listener->event_count = atomic_read_unchecked(&idev->event);
36235 filep->private_data = listener;
36236
36237 if (idev->info->open) {
36238 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
36239 return -EIO;
36240
36241 poll_wait(filep, &idev->wait, wait);
36242 - if (listener->event_count != atomic_read(&idev->event))
36243 + if (listener->event_count != atomic_read_unchecked(&idev->event))
36244 return POLLIN | POLLRDNORM;
36245 return 0;
36246 }
36247 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
36248 do {
36249 set_current_state(TASK_INTERRUPTIBLE);
36250
36251 - event_count = atomic_read(&idev->event);
36252 + event_count = atomic_read_unchecked(&idev->event);
36253 if (event_count != listener->event_count) {
36254 if (copy_to_user(buf, &event_count, count))
36255 retval = -EFAULT;
36256 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
36257 static void uio_vma_open(struct vm_area_struct *vma)
36258 {
36259 struct uio_device *idev = vma->vm_private_data;
36260 - idev->vma_count++;
36261 + local_inc(&idev->vma_count);
36262 }
36263
36264 static void uio_vma_close(struct vm_area_struct *vma)
36265 {
36266 struct uio_device *idev = vma->vm_private_data;
36267 - idev->vma_count--;
36268 + local_dec(&idev->vma_count);
36269 }
36270
36271 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36272 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
36273 idev->owner = owner;
36274 idev->info = info;
36275 init_waitqueue_head(&idev->wait);
36276 - atomic_set(&idev->event, 0);
36277 + atomic_set_unchecked(&idev->event, 0);
36278
36279 ret = uio_get_minor(idev);
36280 if (ret)
36281 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
36282 index a845f8b..4f54072 100644
36283 --- a/drivers/usb/atm/cxacru.c
36284 +++ b/drivers/usb/atm/cxacru.c
36285 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
36286 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36287 if (ret < 2)
36288 return -EINVAL;
36289 - if (index < 0 || index > 0x7f)
36290 + if (index > 0x7f)
36291 return -EINVAL;
36292 pos += tmp;
36293
36294 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36295 index d3448ca..d2864ca 100644
36296 --- a/drivers/usb/atm/usbatm.c
36297 +++ b/drivers/usb/atm/usbatm.c
36298 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36299 if (printk_ratelimit())
36300 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36301 __func__, vpi, vci);
36302 - atomic_inc(&vcc->stats->rx_err);
36303 + atomic_inc_unchecked(&vcc->stats->rx_err);
36304 return;
36305 }
36306
36307 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36308 if (length > ATM_MAX_AAL5_PDU) {
36309 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36310 __func__, length, vcc);
36311 - atomic_inc(&vcc->stats->rx_err);
36312 + atomic_inc_unchecked(&vcc->stats->rx_err);
36313 goto out;
36314 }
36315
36316 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36317 if (sarb->len < pdu_length) {
36318 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36319 __func__, pdu_length, sarb->len, vcc);
36320 - atomic_inc(&vcc->stats->rx_err);
36321 + atomic_inc_unchecked(&vcc->stats->rx_err);
36322 goto out;
36323 }
36324
36325 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36326 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36327 __func__, vcc);
36328 - atomic_inc(&vcc->stats->rx_err);
36329 + atomic_inc_unchecked(&vcc->stats->rx_err);
36330 goto out;
36331 }
36332
36333 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36334 if (printk_ratelimit())
36335 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36336 __func__, length);
36337 - atomic_inc(&vcc->stats->rx_drop);
36338 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36339 goto out;
36340 }
36341
36342 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36343
36344 vcc->push(vcc, skb);
36345
36346 - atomic_inc(&vcc->stats->rx);
36347 + atomic_inc_unchecked(&vcc->stats->rx);
36348 out:
36349 skb_trim(sarb, 0);
36350 }
36351 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36352 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36353
36354 usbatm_pop(vcc, skb);
36355 - atomic_inc(&vcc->stats->tx);
36356 + atomic_inc_unchecked(&vcc->stats->tx);
36357
36358 skb = skb_dequeue(&instance->sndqueue);
36359 }
36360 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36361 if (!left--)
36362 return sprintf(page,
36363 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36364 - atomic_read(&atm_dev->stats.aal5.tx),
36365 - atomic_read(&atm_dev->stats.aal5.tx_err),
36366 - atomic_read(&atm_dev->stats.aal5.rx),
36367 - atomic_read(&atm_dev->stats.aal5.rx_err),
36368 - atomic_read(&atm_dev->stats.aal5.rx_drop));
36369 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36370 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36371 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36372 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36373 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36374
36375 if (!left--) {
36376 if (instance->disconnected)
36377 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36378 index d956965..4179a77 100644
36379 --- a/drivers/usb/core/devices.c
36380 +++ b/drivers/usb/core/devices.c
36381 @@ -126,7 +126,7 @@ static const char format_endpt[] =
36382 * time it gets called.
36383 */
36384 static struct device_connect_event {
36385 - atomic_t count;
36386 + atomic_unchecked_t count;
36387 wait_queue_head_t wait;
36388 } device_event = {
36389 .count = ATOMIC_INIT(1),
36390 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36391
36392 void usbfs_conn_disc_event(void)
36393 {
36394 - atomic_add(2, &device_event.count);
36395 + atomic_add_unchecked(2, &device_event.count);
36396 wake_up(&device_event.wait);
36397 }
36398
36399 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36400
36401 poll_wait(file, &device_event.wait, wait);
36402
36403 - event_count = atomic_read(&device_event.count);
36404 + event_count = atomic_read_unchecked(&device_event.count);
36405 if (file->f_version != event_count) {
36406 file->f_version = event_count;
36407 return POLLIN | POLLRDNORM;
36408 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36409 index b3bdfed..a9460e0 100644
36410 --- a/drivers/usb/core/message.c
36411 +++ b/drivers/usb/core/message.c
36412 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36413 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36414 if (buf) {
36415 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36416 - if (len > 0) {
36417 - smallbuf = kmalloc(++len, GFP_NOIO);
36418 + if (len++ > 0) {
36419 + smallbuf = kmalloc(len, GFP_NOIO);
36420 if (!smallbuf)
36421 return buf;
36422 memcpy(smallbuf, buf, len);
36423 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36424 index 1fc8f12..20647c1 100644
36425 --- a/drivers/usb/early/ehci-dbgp.c
36426 +++ b/drivers/usb/early/ehci-dbgp.c
36427 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36428
36429 #ifdef CONFIG_KGDB
36430 static struct kgdb_io kgdbdbgp_io_ops;
36431 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36432 +static struct kgdb_io kgdbdbgp_io_ops_console;
36433 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36434 #else
36435 #define dbgp_kgdb_mode (0)
36436 #endif
36437 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36438 .write_char = kgdbdbgp_write_char,
36439 };
36440
36441 +static struct kgdb_io kgdbdbgp_io_ops_console = {
36442 + .name = "kgdbdbgp",
36443 + .read_char = kgdbdbgp_read_char,
36444 + .write_char = kgdbdbgp_write_char,
36445 + .is_console = 1
36446 +};
36447 +
36448 static int kgdbdbgp_wait_time;
36449
36450 static int __init kgdbdbgp_parse_config(char *str)
36451 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36452 ptr++;
36453 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36454 }
36455 - kgdb_register_io_module(&kgdbdbgp_io_ops);
36456 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36457 + if (early_dbgp_console.index != -1)
36458 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36459 + else
36460 + kgdb_register_io_module(&kgdbdbgp_io_ops);
36461
36462 return 0;
36463 }
36464 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36465 index d6bea3e..60b250e 100644
36466 --- a/drivers/usb/wusbcore/wa-hc.h
36467 +++ b/drivers/usb/wusbcore/wa-hc.h
36468 @@ -192,7 +192,7 @@ struct wahc {
36469 struct list_head xfer_delayed_list;
36470 spinlock_t xfer_list_lock;
36471 struct work_struct xfer_work;
36472 - atomic_t xfer_id_count;
36473 + atomic_unchecked_t xfer_id_count;
36474 };
36475
36476
36477 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36478 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36479 spin_lock_init(&wa->xfer_list_lock);
36480 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36481 - atomic_set(&wa->xfer_id_count, 1);
36482 + atomic_set_unchecked(&wa->xfer_id_count, 1);
36483 }
36484
36485 /**
36486 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36487 index 57c01ab..8a05959 100644
36488 --- a/drivers/usb/wusbcore/wa-xfer.c
36489 +++ b/drivers/usb/wusbcore/wa-xfer.c
36490 @@ -296,7 +296,7 @@ out:
36491 */
36492 static void wa_xfer_id_init(struct wa_xfer *xfer)
36493 {
36494 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36495 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36496 }
36497
36498 /*
36499 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36500 index c14c42b..f955cc2 100644
36501 --- a/drivers/vhost/vhost.c
36502 +++ b/drivers/vhost/vhost.c
36503 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36504 return 0;
36505 }
36506
36507 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36508 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36509 {
36510 struct file *eventfp, *filep = NULL,
36511 *pollstart = NULL, *pollstop = NULL;
36512 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36513 index b0b2ac3..89a4399 100644
36514 --- a/drivers/video/aty/aty128fb.c
36515 +++ b/drivers/video/aty/aty128fb.c
36516 @@ -148,7 +148,7 @@ enum {
36517 };
36518
36519 /* Must match above enum */
36520 -static const char *r128_family[] __devinitdata = {
36521 +static const char *r128_family[] __devinitconst = {
36522 "AGP",
36523 "PCI",
36524 "PRO AGP",
36525 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36526 index 5c3960d..15cf8fc 100644
36527 --- a/drivers/video/fbcmap.c
36528 +++ b/drivers/video/fbcmap.c
36529 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36530 rc = -ENODEV;
36531 goto out;
36532 }
36533 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36534 - !info->fbops->fb_setcmap)) {
36535 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36536 rc = -EINVAL;
36537 goto out1;
36538 }
36539 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36540 index ad93629..e020fc3 100644
36541 --- a/drivers/video/fbmem.c
36542 +++ b/drivers/video/fbmem.c
36543 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36544 image->dx += image->width + 8;
36545 }
36546 } else if (rotate == FB_ROTATE_UD) {
36547 - for (x = 0; x < num && image->dx >= 0; x++) {
36548 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36549 info->fbops->fb_imageblit(info, image);
36550 image->dx -= image->width + 8;
36551 }
36552 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36553 image->dy += image->height + 8;
36554 }
36555 } else if (rotate == FB_ROTATE_CCW) {
36556 - for (x = 0; x < num && image->dy >= 0; x++) {
36557 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36558 info->fbops->fb_imageblit(info, image);
36559 image->dy -= image->height + 8;
36560 }
36561 @@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36562 return -EFAULT;
36563 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36564 return -EINVAL;
36565 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36566 + if (con2fb.framebuffer >= FB_MAX)
36567 return -EINVAL;
36568 if (!registered_fb[con2fb.framebuffer])
36569 request_module("fb%d", con2fb.framebuffer);
36570 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36571 index 5a5d092..265c5ed 100644
36572 --- a/drivers/video/geode/gx1fb_core.c
36573 +++ b/drivers/video/geode/gx1fb_core.c
36574 @@ -29,7 +29,7 @@ static int crt_option = 1;
36575 static char panel_option[32] = "";
36576
36577 /* Modes relevant to the GX1 (taken from modedb.c) */
36578 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
36579 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
36580 /* 640x480-60 VESA */
36581 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36582 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36583 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36584 index 0fad23f..0e9afa4 100644
36585 --- a/drivers/video/gxt4500.c
36586 +++ b/drivers/video/gxt4500.c
36587 @@ -156,7 +156,7 @@ struct gxt4500_par {
36588 static char *mode_option;
36589
36590 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36591 -static const struct fb_videomode defaultmode __devinitdata = {
36592 +static const struct fb_videomode defaultmode __devinitconst = {
36593 .refresh = 60,
36594 .xres = 1280,
36595 .yres = 1024,
36596 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36597 return 0;
36598 }
36599
36600 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36601 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36602 .id = "IBM GXT4500P",
36603 .type = FB_TYPE_PACKED_PIXELS,
36604 .visual = FB_VISUAL_PSEUDOCOLOR,
36605 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36606 index 7672d2e..b56437f 100644
36607 --- a/drivers/video/i810/i810_accel.c
36608 +++ b/drivers/video/i810/i810_accel.c
36609 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36610 }
36611 }
36612 printk("ringbuffer lockup!!!\n");
36613 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36614 i810_report_error(mmio);
36615 par->dev_flags |= LOCKUP;
36616 info->pixmap.scan_align = 1;
36617 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36618 index 318f6fb..9a389c1 100644
36619 --- a/drivers/video/i810/i810_main.c
36620 +++ b/drivers/video/i810/i810_main.c
36621 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36622 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36623
36624 /* PCI */
36625 -static const char *i810_pci_list[] __devinitdata = {
36626 +static const char *i810_pci_list[] __devinitconst = {
36627 "Intel(R) 810 Framebuffer Device" ,
36628 "Intel(R) 810-DC100 Framebuffer Device" ,
36629 "Intel(R) 810E Framebuffer Device" ,
36630 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36631 index de36693..3c63fc2 100644
36632 --- a/drivers/video/jz4740_fb.c
36633 +++ b/drivers/video/jz4740_fb.c
36634 @@ -136,7 +136,7 @@ struct jzfb {
36635 uint32_t pseudo_palette[16];
36636 };
36637
36638 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36639 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36640 .id = "JZ4740 FB",
36641 .type = FB_TYPE_PACKED_PIXELS,
36642 .visual = FB_VISUAL_TRUECOLOR,
36643 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36644 index 3c14e43..eafa544 100644
36645 --- a/drivers/video/logo/logo_linux_clut224.ppm
36646 +++ b/drivers/video/logo/logo_linux_clut224.ppm
36647 @@ -1,1604 +1,1123 @@
36648 P3
36649 -# Standard 224-color Linux logo
36650 80 80
36651 255
36652 - 0 0 0 0 0 0 0 0 0 0 0 0
36653 - 0 0 0 0 0 0 0 0 0 0 0 0
36654 - 0 0 0 0 0 0 0 0 0 0 0 0
36655 - 0 0 0 0 0 0 0 0 0 0 0 0
36656 - 0 0 0 0 0 0 0 0 0 0 0 0
36657 - 0 0 0 0 0 0 0 0 0 0 0 0
36658 - 0 0 0 0 0 0 0 0 0 0 0 0
36659 - 0 0 0 0 0 0 0 0 0 0 0 0
36660 - 0 0 0 0 0 0 0 0 0 0 0 0
36661 - 6 6 6 6 6 6 10 10 10 10 10 10
36662 - 10 10 10 6 6 6 6 6 6 6 6 6
36663 - 0 0 0 0 0 0 0 0 0 0 0 0
36664 - 0 0 0 0 0 0 0 0 0 0 0 0
36665 - 0 0 0 0 0 0 0 0 0 0 0 0
36666 - 0 0 0 0 0 0 0 0 0 0 0 0
36667 - 0 0 0 0 0 0 0 0 0 0 0 0
36668 - 0 0 0 0 0 0 0 0 0 0 0 0
36669 - 0 0 0 0 0 0 0 0 0 0 0 0
36670 - 0 0 0 0 0 0 0 0 0 0 0 0
36671 - 0 0 0 0 0 0 0 0 0 0 0 0
36672 - 0 0 0 0 0 0 0 0 0 0 0 0
36673 - 0 0 0 0 0 0 0 0 0 0 0 0
36674 - 0 0 0 0 0 0 0 0 0 0 0 0
36675 - 0 0 0 0 0 0 0 0 0 0 0 0
36676 - 0 0 0 0 0 0 0 0 0 0 0 0
36677 - 0 0 0 0 0 0 0 0 0 0 0 0
36678 - 0 0 0 0 0 0 0 0 0 0 0 0
36679 - 0 0 0 0 0 0 0 0 0 0 0 0
36680 - 0 0 0 6 6 6 10 10 10 14 14 14
36681 - 22 22 22 26 26 26 30 30 30 34 34 34
36682 - 30 30 30 30 30 30 26 26 26 18 18 18
36683 - 14 14 14 10 10 10 6 6 6 0 0 0
36684 - 0 0 0 0 0 0 0 0 0 0 0 0
36685 - 0 0 0 0 0 0 0 0 0 0 0 0
36686 - 0 0 0 0 0 0 0 0 0 0 0 0
36687 - 0 0 0 0 0 0 0 0 0 0 0 0
36688 - 0 0 0 0 0 0 0 0 0 0 0 0
36689 - 0 0 0 0 0 0 0 0 0 0 0 0
36690 - 0 0 0 0 0 0 0 0 0 0 0 0
36691 - 0 0 0 0 0 0 0 0 0 0 0 0
36692 - 0 0 0 0 0 0 0 0 0 0 0 0
36693 - 0 0 0 0 0 1 0 0 1 0 0 0
36694 - 0 0 0 0 0 0 0 0 0 0 0 0
36695 - 0 0 0 0 0 0 0 0 0 0 0 0
36696 - 0 0 0 0 0 0 0 0 0 0 0 0
36697 - 0 0 0 0 0 0 0 0 0 0 0 0
36698 - 0 0 0 0 0 0 0 0 0 0 0 0
36699 - 0 0 0 0 0 0 0 0 0 0 0 0
36700 - 6 6 6 14 14 14 26 26 26 42 42 42
36701 - 54 54 54 66 66 66 78 78 78 78 78 78
36702 - 78 78 78 74 74 74 66 66 66 54 54 54
36703 - 42 42 42 26 26 26 18 18 18 10 10 10
36704 - 6 6 6 0 0 0 0 0 0 0 0 0
36705 - 0 0 0 0 0 0 0 0 0 0 0 0
36706 - 0 0 0 0 0 0 0 0 0 0 0 0
36707 - 0 0 0 0 0 0 0 0 0 0 0 0
36708 - 0 0 0 0 0 0 0 0 0 0 0 0
36709 - 0 0 0 0 0 0 0 0 0 0 0 0
36710 - 0 0 0 0 0 0 0 0 0 0 0 0
36711 - 0 0 0 0 0 0 0 0 0 0 0 0
36712 - 0 0 0 0 0 0 0 0 0 0 0 0
36713 - 0 0 1 0 0 0 0 0 0 0 0 0
36714 - 0 0 0 0 0 0 0 0 0 0 0 0
36715 - 0 0 0 0 0 0 0 0 0 0 0 0
36716 - 0 0 0 0 0 0 0 0 0 0 0 0
36717 - 0 0 0 0 0 0 0 0 0 0 0 0
36718 - 0 0 0 0 0 0 0 0 0 0 0 0
36719 - 0 0 0 0 0 0 0 0 0 10 10 10
36720 - 22 22 22 42 42 42 66 66 66 86 86 86
36721 - 66 66 66 38 38 38 38 38 38 22 22 22
36722 - 26 26 26 34 34 34 54 54 54 66 66 66
36723 - 86 86 86 70 70 70 46 46 46 26 26 26
36724 - 14 14 14 6 6 6 0 0 0 0 0 0
36725 - 0 0 0 0 0 0 0 0 0 0 0 0
36726 - 0 0 0 0 0 0 0 0 0 0 0 0
36727 - 0 0 0 0 0 0 0 0 0 0 0 0
36728 - 0 0 0 0 0 0 0 0 0 0 0 0
36729 - 0 0 0 0 0 0 0 0 0 0 0 0
36730 - 0 0 0 0 0 0 0 0 0 0 0 0
36731 - 0 0 0 0 0 0 0 0 0 0 0 0
36732 - 0 0 0 0 0 0 0 0 0 0 0 0
36733 - 0 0 1 0 0 1 0 0 1 0 0 0
36734 - 0 0 0 0 0 0 0 0 0 0 0 0
36735 - 0 0 0 0 0 0 0 0 0 0 0 0
36736 - 0 0 0 0 0 0 0 0 0 0 0 0
36737 - 0 0 0 0 0 0 0 0 0 0 0 0
36738 - 0 0 0 0 0 0 0 0 0 0 0 0
36739 - 0 0 0 0 0 0 10 10 10 26 26 26
36740 - 50 50 50 82 82 82 58 58 58 6 6 6
36741 - 2 2 6 2 2 6 2 2 6 2 2 6
36742 - 2 2 6 2 2 6 2 2 6 2 2 6
36743 - 6 6 6 54 54 54 86 86 86 66 66 66
36744 - 38 38 38 18 18 18 6 6 6 0 0 0
36745 - 0 0 0 0 0 0 0 0 0 0 0 0
36746 - 0 0 0 0 0 0 0 0 0 0 0 0
36747 - 0 0 0 0 0 0 0 0 0 0 0 0
36748 - 0 0 0 0 0 0 0 0 0 0 0 0
36749 - 0 0 0 0 0 0 0 0 0 0 0 0
36750 - 0 0 0 0 0 0 0 0 0 0 0 0
36751 - 0 0 0 0 0 0 0 0 0 0 0 0
36752 - 0 0 0 0 0 0 0 0 0 0 0 0
36753 - 0 0 0 0 0 0 0 0 0 0 0 0
36754 - 0 0 0 0 0 0 0 0 0 0 0 0
36755 - 0 0 0 0 0 0 0 0 0 0 0 0
36756 - 0 0 0 0 0 0 0 0 0 0 0 0
36757 - 0 0 0 0 0 0 0 0 0 0 0 0
36758 - 0 0 0 0 0 0 0 0 0 0 0 0
36759 - 0 0 0 6 6 6 22 22 22 50 50 50
36760 - 78 78 78 34 34 34 2 2 6 2 2 6
36761 - 2 2 6 2 2 6 2 2 6 2 2 6
36762 - 2 2 6 2 2 6 2 2 6 2 2 6
36763 - 2 2 6 2 2 6 6 6 6 70 70 70
36764 - 78 78 78 46 46 46 22 22 22 6 6 6
36765 - 0 0 0 0 0 0 0 0 0 0 0 0
36766 - 0 0 0 0 0 0 0 0 0 0 0 0
36767 - 0 0 0 0 0 0 0 0 0 0 0 0
36768 - 0 0 0 0 0 0 0 0 0 0 0 0
36769 - 0 0 0 0 0 0 0 0 0 0 0 0
36770 - 0 0 0 0 0 0 0 0 0 0 0 0
36771 - 0 0 0 0 0 0 0 0 0 0 0 0
36772 - 0 0 0 0 0 0 0 0 0 0 0 0
36773 - 0 0 1 0 0 1 0 0 1 0 0 0
36774 - 0 0 0 0 0 0 0 0 0 0 0 0
36775 - 0 0 0 0 0 0 0 0 0 0 0 0
36776 - 0 0 0 0 0 0 0 0 0 0 0 0
36777 - 0 0 0 0 0 0 0 0 0 0 0 0
36778 - 0 0 0 0 0 0 0 0 0 0 0 0
36779 - 6 6 6 18 18 18 42 42 42 82 82 82
36780 - 26 26 26 2 2 6 2 2 6 2 2 6
36781 - 2 2 6 2 2 6 2 2 6 2 2 6
36782 - 2 2 6 2 2 6 2 2 6 14 14 14
36783 - 46 46 46 34 34 34 6 6 6 2 2 6
36784 - 42 42 42 78 78 78 42 42 42 18 18 18
36785 - 6 6 6 0 0 0 0 0 0 0 0 0
36786 - 0 0 0 0 0 0 0 0 0 0 0 0
36787 - 0 0 0 0 0 0 0 0 0 0 0 0
36788 - 0 0 0 0 0 0 0 0 0 0 0 0
36789 - 0 0 0 0 0 0 0 0 0 0 0 0
36790 - 0 0 0 0 0 0 0 0 0 0 0 0
36791 - 0 0 0 0 0 0 0 0 0 0 0 0
36792 - 0 0 0 0 0 0 0 0 0 0 0 0
36793 - 0 0 1 0 0 0 0 0 1 0 0 0
36794 - 0 0 0 0 0 0 0 0 0 0 0 0
36795 - 0 0 0 0 0 0 0 0 0 0 0 0
36796 - 0 0 0 0 0 0 0 0 0 0 0 0
36797 - 0 0 0 0 0 0 0 0 0 0 0 0
36798 - 0 0 0 0 0 0 0 0 0 0 0 0
36799 - 10 10 10 30 30 30 66 66 66 58 58 58
36800 - 2 2 6 2 2 6 2 2 6 2 2 6
36801 - 2 2 6 2 2 6 2 2 6 2 2 6
36802 - 2 2 6 2 2 6 2 2 6 26 26 26
36803 - 86 86 86 101 101 101 46 46 46 10 10 10
36804 - 2 2 6 58 58 58 70 70 70 34 34 34
36805 - 10 10 10 0 0 0 0 0 0 0 0 0
36806 - 0 0 0 0 0 0 0 0 0 0 0 0
36807 - 0 0 0 0 0 0 0 0 0 0 0 0
36808 - 0 0 0 0 0 0 0 0 0 0 0 0
36809 - 0 0 0 0 0 0 0 0 0 0 0 0
36810 - 0 0 0 0 0 0 0 0 0 0 0 0
36811 - 0 0 0 0 0 0 0 0 0 0 0 0
36812 - 0 0 0 0 0 0 0 0 0 0 0 0
36813 - 0 0 1 0 0 1 0 0 1 0 0 0
36814 - 0 0 0 0 0 0 0 0 0 0 0 0
36815 - 0 0 0 0 0 0 0 0 0 0 0 0
36816 - 0 0 0 0 0 0 0 0 0 0 0 0
36817 - 0 0 0 0 0 0 0 0 0 0 0 0
36818 - 0 0 0 0 0 0 0 0 0 0 0 0
36819 - 14 14 14 42 42 42 86 86 86 10 10 10
36820 - 2 2 6 2 2 6 2 2 6 2 2 6
36821 - 2 2 6 2 2 6 2 2 6 2 2 6
36822 - 2 2 6 2 2 6 2 2 6 30 30 30
36823 - 94 94 94 94 94 94 58 58 58 26 26 26
36824 - 2 2 6 6 6 6 78 78 78 54 54 54
36825 - 22 22 22 6 6 6 0 0 0 0 0 0
36826 - 0 0 0 0 0 0 0 0 0 0 0 0
36827 - 0 0 0 0 0 0 0 0 0 0 0 0
36828 - 0 0 0 0 0 0 0 0 0 0 0 0
36829 - 0 0 0 0 0 0 0 0 0 0 0 0
36830 - 0 0 0 0 0 0 0 0 0 0 0 0
36831 - 0 0 0 0 0 0 0 0 0 0 0 0
36832 - 0 0 0 0 0 0 0 0 0 0 0 0
36833 - 0 0 0 0 0 0 0 0 0 0 0 0
36834 - 0 0 0 0 0 0 0 0 0 0 0 0
36835 - 0 0 0 0 0 0 0 0 0 0 0 0
36836 - 0 0 0 0 0 0 0 0 0 0 0 0
36837 - 0 0 0 0 0 0 0 0 0 0 0 0
36838 - 0 0 0 0 0 0 0 0 0 6 6 6
36839 - 22 22 22 62 62 62 62 62 62 2 2 6
36840 - 2 2 6 2 2 6 2 2 6 2 2 6
36841 - 2 2 6 2 2 6 2 2 6 2 2 6
36842 - 2 2 6 2 2 6 2 2 6 26 26 26
36843 - 54 54 54 38 38 38 18 18 18 10 10 10
36844 - 2 2 6 2 2 6 34 34 34 82 82 82
36845 - 38 38 38 14 14 14 0 0 0 0 0 0
36846 - 0 0 0 0 0 0 0 0 0 0 0 0
36847 - 0 0 0 0 0 0 0 0 0 0 0 0
36848 - 0 0 0 0 0 0 0 0 0 0 0 0
36849 - 0 0 0 0 0 0 0 0 0 0 0 0
36850 - 0 0 0 0 0 0 0 0 0 0 0 0
36851 - 0 0 0 0 0 0 0 0 0 0 0 0
36852 - 0 0 0 0 0 0 0 0 0 0 0 0
36853 - 0 0 0 0 0 1 0 0 1 0 0 0
36854 - 0 0 0 0 0 0 0 0 0 0 0 0
36855 - 0 0 0 0 0 0 0 0 0 0 0 0
36856 - 0 0 0 0 0 0 0 0 0 0 0 0
36857 - 0 0 0 0 0 0 0 0 0 0 0 0
36858 - 0 0 0 0 0 0 0 0 0 6 6 6
36859 - 30 30 30 78 78 78 30 30 30 2 2 6
36860 - 2 2 6 2 2 6 2 2 6 2 2 6
36861 - 2 2 6 2 2 6 2 2 6 2 2 6
36862 - 2 2 6 2 2 6 2 2 6 10 10 10
36863 - 10 10 10 2 2 6 2 2 6 2 2 6
36864 - 2 2 6 2 2 6 2 2 6 78 78 78
36865 - 50 50 50 18 18 18 6 6 6 0 0 0
36866 - 0 0 0 0 0 0 0 0 0 0 0 0
36867 - 0 0 0 0 0 0 0 0 0 0 0 0
36868 - 0 0 0 0 0 0 0 0 0 0 0 0
36869 - 0 0 0 0 0 0 0 0 0 0 0 0
36870 - 0 0 0 0 0 0 0 0 0 0 0 0
36871 - 0 0 0 0 0 0 0 0 0 0 0 0
36872 - 0 0 0 0 0 0 0 0 0 0 0 0
36873 - 0 0 1 0 0 0 0 0 0 0 0 0
36874 - 0 0 0 0 0 0 0 0 0 0 0 0
36875 - 0 0 0 0 0 0 0 0 0 0 0 0
36876 - 0 0 0 0 0 0 0 0 0 0 0 0
36877 - 0 0 0 0 0 0 0 0 0 0 0 0
36878 - 0 0 0 0 0 0 0 0 0 10 10 10
36879 - 38 38 38 86 86 86 14 14 14 2 2 6
36880 - 2 2 6 2 2 6 2 2 6 2 2 6
36881 - 2 2 6 2 2 6 2 2 6 2 2 6
36882 - 2 2 6 2 2 6 2 2 6 2 2 6
36883 - 2 2 6 2 2 6 2 2 6 2 2 6
36884 - 2 2 6 2 2 6 2 2 6 54 54 54
36885 - 66 66 66 26 26 26 6 6 6 0 0 0
36886 - 0 0 0 0 0 0 0 0 0 0 0 0
36887 - 0 0 0 0 0 0 0 0 0 0 0 0
36888 - 0 0 0 0 0 0 0 0 0 0 0 0
36889 - 0 0 0 0 0 0 0 0 0 0 0 0
36890 - 0 0 0 0 0 0 0 0 0 0 0 0
36891 - 0 0 0 0 0 0 0 0 0 0 0 0
36892 - 0 0 0 0 0 0 0 0 0 0 0 0
36893 - 0 0 0 0 0 1 0 0 1 0 0 0
36894 - 0 0 0 0 0 0 0 0 0 0 0 0
36895 - 0 0 0 0 0 0 0 0 0 0 0 0
36896 - 0 0 0 0 0 0 0 0 0 0 0 0
36897 - 0 0 0 0 0 0 0 0 0 0 0 0
36898 - 0 0 0 0 0 0 0 0 0 14 14 14
36899 - 42 42 42 82 82 82 2 2 6 2 2 6
36900 - 2 2 6 6 6 6 10 10 10 2 2 6
36901 - 2 2 6 2 2 6 2 2 6 2 2 6
36902 - 2 2 6 2 2 6 2 2 6 6 6 6
36903 - 14 14 14 10 10 10 2 2 6 2 2 6
36904 - 2 2 6 2 2 6 2 2 6 18 18 18
36905 - 82 82 82 34 34 34 10 10 10 0 0 0
36906 - 0 0 0 0 0 0 0 0 0 0 0 0
36907 - 0 0 0 0 0 0 0 0 0 0 0 0
36908 - 0 0 0 0 0 0 0 0 0 0 0 0
36909 - 0 0 0 0 0 0 0 0 0 0 0 0
36910 - 0 0 0 0 0 0 0 0 0 0 0 0
36911 - 0 0 0 0 0 0 0 0 0 0 0 0
36912 - 0 0 0 0 0 0 0 0 0 0 0 0
36913 - 0 0 1 0 0 0 0 0 0 0 0 0
36914 - 0 0 0 0 0 0 0 0 0 0 0 0
36915 - 0 0 0 0 0 0 0 0 0 0 0 0
36916 - 0 0 0 0 0 0 0 0 0 0 0 0
36917 - 0 0 0 0 0 0 0 0 0 0 0 0
36918 - 0 0 0 0 0 0 0 0 0 14 14 14
36919 - 46 46 46 86 86 86 2 2 6 2 2 6
36920 - 6 6 6 6 6 6 22 22 22 34 34 34
36921 - 6 6 6 2 2 6 2 2 6 2 2 6
36922 - 2 2 6 2 2 6 18 18 18 34 34 34
36923 - 10 10 10 50 50 50 22 22 22 2 2 6
36924 - 2 2 6 2 2 6 2 2 6 10 10 10
36925 - 86 86 86 42 42 42 14 14 14 0 0 0
36926 - 0 0 0 0 0 0 0 0 0 0 0 0
36927 - 0 0 0 0 0 0 0 0 0 0 0 0
36928 - 0 0 0 0 0 0 0 0 0 0 0 0
36929 - 0 0 0 0 0 0 0 0 0 0 0 0
36930 - 0 0 0 0 0 0 0 0 0 0 0 0
36931 - 0 0 0 0 0 0 0 0 0 0 0 0
36932 - 0 0 0 0 0 0 0 0 0 0 0 0
36933 - 0 0 1 0 0 1 0 0 1 0 0 0
36934 - 0 0 0 0 0 0 0 0 0 0 0 0
36935 - 0 0 0 0 0 0 0 0 0 0 0 0
36936 - 0 0 0 0 0 0 0 0 0 0 0 0
36937 - 0 0 0 0 0 0 0 0 0 0 0 0
36938 - 0 0 0 0 0 0 0 0 0 14 14 14
36939 - 46 46 46 86 86 86 2 2 6 2 2 6
36940 - 38 38 38 116 116 116 94 94 94 22 22 22
36941 - 22 22 22 2 2 6 2 2 6 2 2 6
36942 - 14 14 14 86 86 86 138 138 138 162 162 162
36943 -154 154 154 38 38 38 26 26 26 6 6 6
36944 - 2 2 6 2 2 6 2 2 6 2 2 6
36945 - 86 86 86 46 46 46 14 14 14 0 0 0
36946 - 0 0 0 0 0 0 0 0 0 0 0 0
36947 - 0 0 0 0 0 0 0 0 0 0 0 0
36948 - 0 0 0 0 0 0 0 0 0 0 0 0
36949 - 0 0 0 0 0 0 0 0 0 0 0 0
36950 - 0 0 0 0 0 0 0 0 0 0 0 0
36951 - 0 0 0 0 0 0 0 0 0 0 0 0
36952 - 0 0 0 0 0 0 0 0 0 0 0 0
36953 - 0 0 0 0 0 0 0 0 0 0 0 0
36954 - 0 0 0 0 0 0 0 0 0 0 0 0
36955 - 0 0 0 0 0 0 0 0 0 0 0 0
36956 - 0 0 0 0 0 0 0 0 0 0 0 0
36957 - 0 0 0 0 0 0 0 0 0 0 0 0
36958 - 0 0 0 0 0 0 0 0 0 14 14 14
36959 - 46 46 46 86 86 86 2 2 6 14 14 14
36960 -134 134 134 198 198 198 195 195 195 116 116 116
36961 - 10 10 10 2 2 6 2 2 6 6 6 6
36962 -101 98 89 187 187 187 210 210 210 218 218 218
36963 -214 214 214 134 134 134 14 14 14 6 6 6
36964 - 2 2 6 2 2 6 2 2 6 2 2 6
36965 - 86 86 86 50 50 50 18 18 18 6 6 6
36966 - 0 0 0 0 0 0 0 0 0 0 0 0
36967 - 0 0 0 0 0 0 0 0 0 0 0 0
36968 - 0 0 0 0 0 0 0 0 0 0 0 0
36969 - 0 0 0 0 0 0 0 0 0 0 0 0
36970 - 0 0 0 0 0 0 0 0 0 0 0 0
36971 - 0 0 0 0 0 0 0 0 0 0 0 0
36972 - 0 0 0 0 0 0 0 0 1 0 0 0
36973 - 0 0 1 0 0 1 0 0 1 0 0 0
36974 - 0 0 0 0 0 0 0 0 0 0 0 0
36975 - 0 0 0 0 0 0 0 0 0 0 0 0
36976 - 0 0 0 0 0 0 0 0 0 0 0 0
36977 - 0 0 0 0 0 0 0 0 0 0 0 0
36978 - 0 0 0 0 0 0 0 0 0 14 14 14
36979 - 46 46 46 86 86 86 2 2 6 54 54 54
36980 -218 218 218 195 195 195 226 226 226 246 246 246
36981 - 58 58 58 2 2 6 2 2 6 30 30 30
36982 -210 210 210 253 253 253 174 174 174 123 123 123
36983 -221 221 221 234 234 234 74 74 74 2 2 6
36984 - 2 2 6 2 2 6 2 2 6 2 2 6
36985 - 70 70 70 58 58 58 22 22 22 6 6 6
36986 - 0 0 0 0 0 0 0 0 0 0 0 0
36987 - 0 0 0 0 0 0 0 0 0 0 0 0
36988 - 0 0 0 0 0 0 0 0 0 0 0 0
36989 - 0 0 0 0 0 0 0 0 0 0 0 0
36990 - 0 0 0 0 0 0 0 0 0 0 0 0
36991 - 0 0 0 0 0 0 0 0 0 0 0 0
36992 - 0 0 0 0 0 0 0 0 0 0 0 0
36993 - 0 0 0 0 0 0 0 0 0 0 0 0
36994 - 0 0 0 0 0 0 0 0 0 0 0 0
36995 - 0 0 0 0 0 0 0 0 0 0 0 0
36996 - 0 0 0 0 0 0 0 0 0 0 0 0
36997 - 0 0 0 0 0 0 0 0 0 0 0 0
36998 - 0 0 0 0 0 0 0 0 0 14 14 14
36999 - 46 46 46 82 82 82 2 2 6 106 106 106
37000 -170 170 170 26 26 26 86 86 86 226 226 226
37001 -123 123 123 10 10 10 14 14 14 46 46 46
37002 -231 231 231 190 190 190 6 6 6 70 70 70
37003 - 90 90 90 238 238 238 158 158 158 2 2 6
37004 - 2 2 6 2 2 6 2 2 6 2 2 6
37005 - 70 70 70 58 58 58 22 22 22 6 6 6
37006 - 0 0 0 0 0 0 0 0 0 0 0 0
37007 - 0 0 0 0 0 0 0 0 0 0 0 0
37008 - 0 0 0 0 0 0 0 0 0 0 0 0
37009 - 0 0 0 0 0 0 0 0 0 0 0 0
37010 - 0 0 0 0 0 0 0 0 0 0 0 0
37011 - 0 0 0 0 0 0 0 0 0 0 0 0
37012 - 0 0 0 0 0 0 0 0 1 0 0 0
37013 - 0 0 1 0 0 1 0 0 1 0 0 0
37014 - 0 0 0 0 0 0 0 0 0 0 0 0
37015 - 0 0 0 0 0 0 0 0 0 0 0 0
37016 - 0 0 0 0 0 0 0 0 0 0 0 0
37017 - 0 0 0 0 0 0 0 0 0 0 0 0
37018 - 0 0 0 0 0 0 0 0 0 14 14 14
37019 - 42 42 42 86 86 86 6 6 6 116 116 116
37020 -106 106 106 6 6 6 70 70 70 149 149 149
37021 -128 128 128 18 18 18 38 38 38 54 54 54
37022 -221 221 221 106 106 106 2 2 6 14 14 14
37023 - 46 46 46 190 190 190 198 198 198 2 2 6
37024 - 2 2 6 2 2 6 2 2 6 2 2 6
37025 - 74 74 74 62 62 62 22 22 22 6 6 6
37026 - 0 0 0 0 0 0 0 0 0 0 0 0
37027 - 0 0 0 0 0 0 0 0 0 0 0 0
37028 - 0 0 0 0 0 0 0 0 0 0 0 0
37029 - 0 0 0 0 0 0 0 0 0 0 0 0
37030 - 0 0 0 0 0 0 0 0 0 0 0 0
37031 - 0 0 0 0 0 0 0 0 0 0 0 0
37032 - 0 0 0 0 0 0 0 0 1 0 0 0
37033 - 0 0 1 0 0 0 0 0 1 0 0 0
37034 - 0 0 0 0 0 0 0 0 0 0 0 0
37035 - 0 0 0 0 0 0 0 0 0 0 0 0
37036 - 0 0 0 0 0 0 0 0 0 0 0 0
37037 - 0 0 0 0 0 0 0 0 0 0 0 0
37038 - 0 0 0 0 0 0 0 0 0 14 14 14
37039 - 42 42 42 94 94 94 14 14 14 101 101 101
37040 -128 128 128 2 2 6 18 18 18 116 116 116
37041 -118 98 46 121 92 8 121 92 8 98 78 10
37042 -162 162 162 106 106 106 2 2 6 2 2 6
37043 - 2 2 6 195 195 195 195 195 195 6 6 6
37044 - 2 2 6 2 2 6 2 2 6 2 2 6
37045 - 74 74 74 62 62 62 22 22 22 6 6 6
37046 - 0 0 0 0 0 0 0 0 0 0 0 0
37047 - 0 0 0 0 0 0 0 0 0 0 0 0
37048 - 0 0 0 0 0 0 0 0 0 0 0 0
37049 - 0 0 0 0 0 0 0 0 0 0 0 0
37050 - 0 0 0 0 0 0 0 0 0 0 0 0
37051 - 0 0 0 0 0 0 0 0 0 0 0 0
37052 - 0 0 0 0 0 0 0 0 1 0 0 1
37053 - 0 0 1 0 0 0 0 0 1 0 0 0
37054 - 0 0 0 0 0 0 0 0 0 0 0 0
37055 - 0 0 0 0 0 0 0 0 0 0 0 0
37056 - 0 0 0 0 0 0 0 0 0 0 0 0
37057 - 0 0 0 0 0 0 0 0 0 0 0 0
37058 - 0 0 0 0 0 0 0 0 0 10 10 10
37059 - 38 38 38 90 90 90 14 14 14 58 58 58
37060 -210 210 210 26 26 26 54 38 6 154 114 10
37061 -226 170 11 236 186 11 225 175 15 184 144 12
37062 -215 174 15 175 146 61 37 26 9 2 2 6
37063 - 70 70 70 246 246 246 138 138 138 2 2 6
37064 - 2 2 6 2 2 6 2 2 6 2 2 6
37065 - 70 70 70 66 66 66 26 26 26 6 6 6
37066 - 0 0 0 0 0 0 0 0 0 0 0 0
37067 - 0 0 0 0 0 0 0 0 0 0 0 0
37068 - 0 0 0 0 0 0 0 0 0 0 0 0
37069 - 0 0 0 0 0 0 0 0 0 0 0 0
37070 - 0 0 0 0 0 0 0 0 0 0 0 0
37071 - 0 0 0 0 0 0 0 0 0 0 0 0
37072 - 0 0 0 0 0 0 0 0 0 0 0 0
37073 - 0 0 0 0 0 0 0 0 0 0 0 0
37074 - 0 0 0 0 0 0 0 0 0 0 0 0
37075 - 0 0 0 0 0 0 0 0 0 0 0 0
37076 - 0 0 0 0 0 0 0 0 0 0 0 0
37077 - 0 0 0 0 0 0 0 0 0 0 0 0
37078 - 0 0 0 0 0 0 0 0 0 10 10 10
37079 - 38 38 38 86 86 86 14 14 14 10 10 10
37080 -195 195 195 188 164 115 192 133 9 225 175 15
37081 -239 182 13 234 190 10 232 195 16 232 200 30
37082 -245 207 45 241 208 19 232 195 16 184 144 12
37083 -218 194 134 211 206 186 42 42 42 2 2 6
37084 - 2 2 6 2 2 6 2 2 6 2 2 6
37085 - 50 50 50 74 74 74 30 30 30 6 6 6
37086 - 0 0 0 0 0 0 0 0 0 0 0 0
37087 - 0 0 0 0 0 0 0 0 0 0 0 0
37088 - 0 0 0 0 0 0 0 0 0 0 0 0
37089 - 0 0 0 0 0 0 0 0 0 0 0 0
37090 - 0 0 0 0 0 0 0 0 0 0 0 0
37091 - 0 0 0 0 0 0 0 0 0 0 0 0
37092 - 0 0 0 0 0 0 0 0 0 0 0 0
37093 - 0 0 0 0 0 0 0 0 0 0 0 0
37094 - 0 0 0 0 0 0 0 0 0 0 0 0
37095 - 0 0 0 0 0 0 0 0 0 0 0 0
37096 - 0 0 0 0 0 0 0 0 0 0 0 0
37097 - 0 0 0 0 0 0 0 0 0 0 0 0
37098 - 0 0 0 0 0 0 0 0 0 10 10 10
37099 - 34 34 34 86 86 86 14 14 14 2 2 6
37100 -121 87 25 192 133 9 219 162 10 239 182 13
37101 -236 186 11 232 195 16 241 208 19 244 214 54
37102 -246 218 60 246 218 38 246 215 20 241 208 19
37103 -241 208 19 226 184 13 121 87 25 2 2 6
37104 - 2 2 6 2 2 6 2 2 6 2 2 6
37105 - 50 50 50 82 82 82 34 34 34 10 10 10
37106 - 0 0 0 0 0 0 0 0 0 0 0 0
37107 - 0 0 0 0 0 0 0 0 0 0 0 0
37108 - 0 0 0 0 0 0 0 0 0 0 0 0
37109 - 0 0 0 0 0 0 0 0 0 0 0 0
37110 - 0 0 0 0 0 0 0 0 0 0 0 0
37111 - 0 0 0 0 0 0 0 0 0 0 0 0
37112 - 0 0 0 0 0 0 0 0 0 0 0 0
37113 - 0 0 0 0 0 0 0 0 0 0 0 0
37114 - 0 0 0 0 0 0 0 0 0 0 0 0
37115 - 0 0 0 0 0 0 0 0 0 0 0 0
37116 - 0 0 0 0 0 0 0 0 0 0 0 0
37117 - 0 0 0 0 0 0 0 0 0 0 0 0
37118 - 0 0 0 0 0 0 0 0 0 10 10 10
37119 - 34 34 34 82 82 82 30 30 30 61 42 6
37120 -180 123 7 206 145 10 230 174 11 239 182 13
37121 -234 190 10 238 202 15 241 208 19 246 218 74
37122 -246 218 38 246 215 20 246 215 20 246 215 20
37123 -226 184 13 215 174 15 184 144 12 6 6 6
37124 - 2 2 6 2 2 6 2 2 6 2 2 6
37125 - 26 26 26 94 94 94 42 42 42 14 14 14
37126 - 0 0 0 0 0 0 0 0 0 0 0 0
37127 - 0 0 0 0 0 0 0 0 0 0 0 0
37128 - 0 0 0 0 0 0 0 0 0 0 0 0
37129 - 0 0 0 0 0 0 0 0 0 0 0 0
37130 - 0 0 0 0 0 0 0 0 0 0 0 0
37131 - 0 0 0 0 0 0 0 0 0 0 0 0
37132 - 0 0 0 0 0 0 0 0 0 0 0 0
37133 - 0 0 0 0 0 0 0 0 0 0 0 0
37134 - 0 0 0 0 0 0 0 0 0 0 0 0
37135 - 0 0 0 0 0 0 0 0 0 0 0 0
37136 - 0 0 0 0 0 0 0 0 0 0 0 0
37137 - 0 0 0 0 0 0 0 0 0 0 0 0
37138 - 0 0 0 0 0 0 0 0 0 10 10 10
37139 - 30 30 30 78 78 78 50 50 50 104 69 6
37140 -192 133 9 216 158 10 236 178 12 236 186 11
37141 -232 195 16 241 208 19 244 214 54 245 215 43
37142 -246 215 20 246 215 20 241 208 19 198 155 10
37143 -200 144 11 216 158 10 156 118 10 2 2 6
37144 - 2 2 6 2 2 6 2 2 6 2 2 6
37145 - 6 6 6 90 90 90 54 54 54 18 18 18
37146 - 6 6 6 0 0 0 0 0 0 0 0 0
37147 - 0 0 0 0 0 0 0 0 0 0 0 0
37148 - 0 0 0 0 0 0 0 0 0 0 0 0
37149 - 0 0 0 0 0 0 0 0 0 0 0 0
37150 - 0 0 0 0 0 0 0 0 0 0 0 0
37151 - 0 0 0 0 0 0 0 0 0 0 0 0
37152 - 0 0 0 0 0 0 0 0 0 0 0 0
37153 - 0 0 0 0 0 0 0 0 0 0 0 0
37154 - 0 0 0 0 0 0 0 0 0 0 0 0
37155 - 0 0 0 0 0 0 0 0 0 0 0 0
37156 - 0 0 0 0 0 0 0 0 0 0 0 0
37157 - 0 0 0 0 0 0 0 0 0 0 0 0
37158 - 0 0 0 0 0 0 0 0 0 10 10 10
37159 - 30 30 30 78 78 78 46 46 46 22 22 22
37160 -137 92 6 210 162 10 239 182 13 238 190 10
37161 -238 202 15 241 208 19 246 215 20 246 215 20
37162 -241 208 19 203 166 17 185 133 11 210 150 10
37163 -216 158 10 210 150 10 102 78 10 2 2 6
37164 - 6 6 6 54 54 54 14 14 14 2 2 6
37165 - 2 2 6 62 62 62 74 74 74 30 30 30
37166 - 10 10 10 0 0 0 0 0 0 0 0 0
37167 - 0 0 0 0 0 0 0 0 0 0 0 0
37168 - 0 0 0 0 0 0 0 0 0 0 0 0
37169 - 0 0 0 0 0 0 0 0 0 0 0 0
37170 - 0 0 0 0 0 0 0 0 0 0 0 0
37171 - 0 0 0 0 0 0 0 0 0 0 0 0
37172 - 0 0 0 0 0 0 0 0 0 0 0 0
37173 - 0 0 0 0 0 0 0 0 0 0 0 0
37174 - 0 0 0 0 0 0 0 0 0 0 0 0
37175 - 0 0 0 0 0 0 0 0 0 0 0 0
37176 - 0 0 0 0 0 0 0 0 0 0 0 0
37177 - 0 0 0 0 0 0 0 0 0 0 0 0
37178 - 0 0 0 0 0 0 0 0 0 10 10 10
37179 - 34 34 34 78 78 78 50 50 50 6 6 6
37180 - 94 70 30 139 102 15 190 146 13 226 184 13
37181 -232 200 30 232 195 16 215 174 15 190 146 13
37182 -168 122 10 192 133 9 210 150 10 213 154 11
37183 -202 150 34 182 157 106 101 98 89 2 2 6
37184 - 2 2 6 78 78 78 116 116 116 58 58 58
37185 - 2 2 6 22 22 22 90 90 90 46 46 46
37186 - 18 18 18 6 6 6 0 0 0 0 0 0
37187 - 0 0 0 0 0 0 0 0 0 0 0 0
37188 - 0 0 0 0 0 0 0 0 0 0 0 0
37189 - 0 0 0 0 0 0 0 0 0 0 0 0
37190 - 0 0 0 0 0 0 0 0 0 0 0 0
37191 - 0 0 0 0 0 0 0 0 0 0 0 0
37192 - 0 0 0 0 0 0 0 0 0 0 0 0
37193 - 0 0 0 0 0 0 0 0 0 0 0 0
37194 - 0 0 0 0 0 0 0 0 0 0 0 0
37195 - 0 0 0 0 0 0 0 0 0 0 0 0
37196 - 0 0 0 0 0 0 0 0 0 0 0 0
37197 - 0 0 0 0 0 0 0 0 0 0 0 0
37198 - 0 0 0 0 0 0 0 0 0 10 10 10
37199 - 38 38 38 86 86 86 50 50 50 6 6 6
37200 -128 128 128 174 154 114 156 107 11 168 122 10
37201 -198 155 10 184 144 12 197 138 11 200 144 11
37202 -206 145 10 206 145 10 197 138 11 188 164 115
37203 -195 195 195 198 198 198 174 174 174 14 14 14
37204 - 2 2 6 22 22 22 116 116 116 116 116 116
37205 - 22 22 22 2 2 6 74 74 74 70 70 70
37206 - 30 30 30 10 10 10 0 0 0 0 0 0
37207 - 0 0 0 0 0 0 0 0 0 0 0 0
37208 - 0 0 0 0 0 0 0 0 0 0 0 0
37209 - 0 0 0 0 0 0 0 0 0 0 0 0
37210 - 0 0 0 0 0 0 0 0 0 0 0 0
37211 - 0 0 0 0 0 0 0 0 0 0 0 0
37212 - 0 0 0 0 0 0 0 0 0 0 0 0
37213 - 0 0 0 0 0 0 0 0 0 0 0 0
37214 - 0 0 0 0 0 0 0 0 0 0 0 0
37215 - 0 0 0 0 0 0 0 0 0 0 0 0
37216 - 0 0 0 0 0 0 0 0 0 0 0 0
37217 - 0 0 0 0 0 0 0 0 0 0 0 0
37218 - 0 0 0 0 0 0 6 6 6 18 18 18
37219 - 50 50 50 101 101 101 26 26 26 10 10 10
37220 -138 138 138 190 190 190 174 154 114 156 107 11
37221 -197 138 11 200 144 11 197 138 11 192 133 9
37222 -180 123 7 190 142 34 190 178 144 187 187 187
37223 -202 202 202 221 221 221 214 214 214 66 66 66
37224 - 2 2 6 2 2 6 50 50 50 62 62 62
37225 - 6 6 6 2 2 6 10 10 10 90 90 90
37226 - 50 50 50 18 18 18 6 6 6 0 0 0
37227 - 0 0 0 0 0 0 0 0 0 0 0 0
37228 - 0 0 0 0 0 0 0 0 0 0 0 0
37229 - 0 0 0 0 0 0 0 0 0 0 0 0
37230 - 0 0 0 0 0 0 0 0 0 0 0 0
37231 - 0 0 0 0 0 0 0 0 0 0 0 0
37232 - 0 0 0 0 0 0 0 0 0 0 0 0
37233 - 0 0 0 0 0 0 0 0 0 0 0 0
37234 - 0 0 0 0 0 0 0 0 0 0 0 0
37235 - 0 0 0 0 0 0 0 0 0 0 0 0
37236 - 0 0 0 0 0 0 0 0 0 0 0 0
37237 - 0 0 0 0 0 0 0 0 0 0 0 0
37238 - 0 0 0 0 0 0 10 10 10 34 34 34
37239 - 74 74 74 74 74 74 2 2 6 6 6 6
37240 -144 144 144 198 198 198 190 190 190 178 166 146
37241 -154 121 60 156 107 11 156 107 11 168 124 44
37242 -174 154 114 187 187 187 190 190 190 210 210 210
37243 -246 246 246 253 253 253 253 253 253 182 182 182
37244 - 6 6 6 2 2 6 2 2 6 2 2 6
37245 - 2 2 6 2 2 6 2 2 6 62 62 62
37246 - 74 74 74 34 34 34 14 14 14 0 0 0
37247 - 0 0 0 0 0 0 0 0 0 0 0 0
37248 - 0 0 0 0 0 0 0 0 0 0 0 0
37249 - 0 0 0 0 0 0 0 0 0 0 0 0
37250 - 0 0 0 0 0 0 0 0 0 0 0 0
37251 - 0 0 0 0 0 0 0 0 0 0 0 0
37252 - 0 0 0 0 0 0 0 0 0 0 0 0
37253 - 0 0 0 0 0 0 0 0 0 0 0 0
37254 - 0 0 0 0 0 0 0 0 0 0 0 0
37255 - 0 0 0 0 0 0 0 0 0 0 0 0
37256 - 0 0 0 0 0 0 0 0 0 0 0 0
37257 - 0 0 0 0 0 0 0 0 0 0 0 0
37258 - 0 0 0 10 10 10 22 22 22 54 54 54
37259 - 94 94 94 18 18 18 2 2 6 46 46 46
37260 -234 234 234 221 221 221 190 190 190 190 190 190
37261 -190 190 190 187 187 187 187 187 187 190 190 190
37262 -190 190 190 195 195 195 214 214 214 242 242 242
37263 -253 253 253 253 253 253 253 253 253 253 253 253
37264 - 82 82 82 2 2 6 2 2 6 2 2 6
37265 - 2 2 6 2 2 6 2 2 6 14 14 14
37266 - 86 86 86 54 54 54 22 22 22 6 6 6
37267 - 0 0 0 0 0 0 0 0 0 0 0 0
37268 - 0 0 0 0 0 0 0 0 0 0 0 0
37269 - 0 0 0 0 0 0 0 0 0 0 0 0
37270 - 0 0 0 0 0 0 0 0 0 0 0 0
37271 - 0 0 0 0 0 0 0 0 0 0 0 0
37272 - 0 0 0 0 0 0 0 0 0 0 0 0
37273 - 0 0 0 0 0 0 0 0 0 0 0 0
37274 - 0 0 0 0 0 0 0 0 0 0 0 0
37275 - 0 0 0 0 0 0 0 0 0 0 0 0
37276 - 0 0 0 0 0 0 0 0 0 0 0 0
37277 - 0 0 0 0 0 0 0 0 0 0 0 0
37278 - 6 6 6 18 18 18 46 46 46 90 90 90
37279 - 46 46 46 18 18 18 6 6 6 182 182 182
37280 -253 253 253 246 246 246 206 206 206 190 190 190
37281 -190 190 190 190 190 190 190 190 190 190 190 190
37282 -206 206 206 231 231 231 250 250 250 253 253 253
37283 -253 253 253 253 253 253 253 253 253 253 253 253
37284 -202 202 202 14 14 14 2 2 6 2 2 6
37285 - 2 2 6 2 2 6 2 2 6 2 2 6
37286 - 42 42 42 86 86 86 42 42 42 18 18 18
37287 - 6 6 6 0 0 0 0 0 0 0 0 0
37288 - 0 0 0 0 0 0 0 0 0 0 0 0
37289 - 0 0 0 0 0 0 0 0 0 0 0 0
37290 - 0 0 0 0 0 0 0 0 0 0 0 0
37291 - 0 0 0 0 0 0 0 0 0 0 0 0
37292 - 0 0 0 0 0 0 0 0 0 0 0 0
37293 - 0 0 0 0 0 0 0 0 0 0 0 0
37294 - 0 0 0 0 0 0 0 0 0 0 0 0
37295 - 0 0 0 0 0 0 0 0 0 0 0 0
37296 - 0 0 0 0 0 0 0 0 0 0 0 0
37297 - 0 0 0 0 0 0 0 0 0 6 6 6
37298 - 14 14 14 38 38 38 74 74 74 66 66 66
37299 - 2 2 6 6 6 6 90 90 90 250 250 250
37300 -253 253 253 253 253 253 238 238 238 198 198 198
37301 -190 190 190 190 190 190 195 195 195 221 221 221
37302 -246 246 246 253 253 253 253 253 253 253 253 253
37303 -253 253 253 253 253 253 253 253 253 253 253 253
37304 -253 253 253 82 82 82 2 2 6 2 2 6
37305 - 2 2 6 2 2 6 2 2 6 2 2 6
37306 - 2 2 6 78 78 78 70 70 70 34 34 34
37307 - 14 14 14 6 6 6 0 0 0 0 0 0
37308 - 0 0 0 0 0 0 0 0 0 0 0 0
37309 - 0 0 0 0 0 0 0 0 0 0 0 0
37310 - 0 0 0 0 0 0 0 0 0 0 0 0
37311 - 0 0 0 0 0 0 0 0 0 0 0 0
37312 - 0 0 0 0 0 0 0 0 0 0 0 0
37313 - 0 0 0 0 0 0 0 0 0 0 0 0
37314 - 0 0 0 0 0 0 0 0 0 0 0 0
37315 - 0 0 0 0 0 0 0 0 0 0 0 0
37316 - 0 0 0 0 0 0 0 0 0 0 0 0
37317 - 0 0 0 0 0 0 0 0 0 14 14 14
37318 - 34 34 34 66 66 66 78 78 78 6 6 6
37319 - 2 2 6 18 18 18 218 218 218 253 253 253
37320 -253 253 253 253 253 253 253 253 253 246 246 246
37321 -226 226 226 231 231 231 246 246 246 253 253 253
37322 -253 253 253 253 253 253 253 253 253 253 253 253
37323 -253 253 253 253 253 253 253 253 253 253 253 253
37324 -253 253 253 178 178 178 2 2 6 2 2 6
37325 - 2 2 6 2 2 6 2 2 6 2 2 6
37326 - 2 2 6 18 18 18 90 90 90 62 62 62
37327 - 30 30 30 10 10 10 0 0 0 0 0 0
37328 - 0 0 0 0 0 0 0 0 0 0 0 0
37329 - 0 0 0 0 0 0 0 0 0 0 0 0
37330 - 0 0 0 0 0 0 0 0 0 0 0 0
37331 - 0 0 0 0 0 0 0 0 0 0 0 0
37332 - 0 0 0 0 0 0 0 0 0 0 0 0
37333 - 0 0 0 0 0 0 0 0 0 0 0 0
37334 - 0 0 0 0 0 0 0 0 0 0 0 0
37335 - 0 0 0 0 0 0 0 0 0 0 0 0
37336 - 0 0 0 0 0 0 0 0 0 0 0 0
37337 - 0 0 0 0 0 0 10 10 10 26 26 26
37338 - 58 58 58 90 90 90 18 18 18 2 2 6
37339 - 2 2 6 110 110 110 253 253 253 253 253 253
37340 -253 253 253 253 253 253 253 253 253 253 253 253
37341 -250 250 250 253 253 253 253 253 253 253 253 253
37342 -253 253 253 253 253 253 253 253 253 253 253 253
37343 -253 253 253 253 253 253 253 253 253 253 253 253
37344 -253 253 253 231 231 231 18 18 18 2 2 6
37345 - 2 2 6 2 2 6 2 2 6 2 2 6
37346 - 2 2 6 2 2 6 18 18 18 94 94 94
37347 - 54 54 54 26 26 26 10 10 10 0 0 0
37348 - 0 0 0 0 0 0 0 0 0 0 0 0
37349 - 0 0 0 0 0 0 0 0 0 0 0 0
37350 - 0 0 0 0 0 0 0 0 0 0 0 0
37351 - 0 0 0 0 0 0 0 0 0 0 0 0
37352 - 0 0 0 0 0 0 0 0 0 0 0 0
37353 - 0 0 0 0 0 0 0 0 0 0 0 0
37354 - 0 0 0 0 0 0 0 0 0 0 0 0
37355 - 0 0 0 0 0 0 0 0 0 0 0 0
37356 - 0 0 0 0 0 0 0 0 0 0 0 0
37357 - 0 0 0 6 6 6 22 22 22 50 50 50
37358 - 90 90 90 26 26 26 2 2 6 2 2 6
37359 - 14 14 14 195 195 195 250 250 250 253 253 253
37360 -253 253 253 253 253 253 253 253 253 253 253 253
37361 -253 253 253 253 253 253 253 253 253 253 253 253
37362 -253 253 253 253 253 253 253 253 253 253 253 253
37363 -253 253 253 253 253 253 253 253 253 253 253 253
37364 -250 250 250 242 242 242 54 54 54 2 2 6
37365 - 2 2 6 2 2 6 2 2 6 2 2 6
37366 - 2 2 6 2 2 6 2 2 6 38 38 38
37367 - 86 86 86 50 50 50 22 22 22 6 6 6
37368 - 0 0 0 0 0 0 0 0 0 0 0 0
37369 - 0 0 0 0 0 0 0 0 0 0 0 0
37370 - 0 0 0 0 0 0 0 0 0 0 0 0
37371 - 0 0 0 0 0 0 0 0 0 0 0 0
37372 - 0 0 0 0 0 0 0 0 0 0 0 0
37373 - 0 0 0 0 0 0 0 0 0 0 0 0
37374 - 0 0 0 0 0 0 0 0 0 0 0 0
37375 - 0 0 0 0 0 0 0 0 0 0 0 0
37376 - 0 0 0 0 0 0 0 0 0 0 0 0
37377 - 6 6 6 14 14 14 38 38 38 82 82 82
37378 - 34 34 34 2 2 6 2 2 6 2 2 6
37379 - 42 42 42 195 195 195 246 246 246 253 253 253
37380 -253 253 253 253 253 253 253 253 253 250 250 250
37381 -242 242 242 242 242 242 250 250 250 253 253 253
37382 -253 253 253 253 253 253 253 253 253 253 253 253
37383 -253 253 253 250 250 250 246 246 246 238 238 238
37384 -226 226 226 231 231 231 101 101 101 6 6 6
37385 - 2 2 6 2 2 6 2 2 6 2 2 6
37386 - 2 2 6 2 2 6 2 2 6 2 2 6
37387 - 38 38 38 82 82 82 42 42 42 14 14 14
37388 - 6 6 6 0 0 0 0 0 0 0 0 0
37389 - 0 0 0 0 0 0 0 0 0 0 0 0
37390 - 0 0 0 0 0 0 0 0 0 0 0 0
37391 - 0 0 0 0 0 0 0 0 0 0 0 0
37392 - 0 0 0 0 0 0 0 0 0 0 0 0
37393 - 0 0 0 0 0 0 0 0 0 0 0 0
37394 - 0 0 0 0 0 0 0 0 0 0 0 0
37395 - 0 0 0 0 0 0 0 0 0 0 0 0
37396 - 0 0 0 0 0 0 0 0 0 0 0 0
37397 - 10 10 10 26 26 26 62 62 62 66 66 66
37398 - 2 2 6 2 2 6 2 2 6 6 6 6
37399 - 70 70 70 170 170 170 206 206 206 234 234 234
37400 -246 246 246 250 250 250 250 250 250 238 238 238
37401 -226 226 226 231 231 231 238 238 238 250 250 250
37402 -250 250 250 250 250 250 246 246 246 231 231 231
37403 -214 214 214 206 206 206 202 202 202 202 202 202
37404 -198 198 198 202 202 202 182 182 182 18 18 18
37405 - 2 2 6 2 2 6 2 2 6 2 2 6
37406 - 2 2 6 2 2 6 2 2 6 2 2 6
37407 - 2 2 6 62 62 62 66 66 66 30 30 30
37408 - 10 10 10 0 0 0 0 0 0 0 0 0
37409 - 0 0 0 0 0 0 0 0 0 0 0 0
37410 - 0 0 0 0 0 0 0 0 0 0 0 0
37411 - 0 0 0 0 0 0 0 0 0 0 0 0
37412 - 0 0 0 0 0 0 0 0 0 0 0 0
37413 - 0 0 0 0 0 0 0 0 0 0 0 0
37414 - 0 0 0 0 0 0 0 0 0 0 0 0
37415 - 0 0 0 0 0 0 0 0 0 0 0 0
37416 - 0 0 0 0 0 0 0 0 0 0 0 0
37417 - 14 14 14 42 42 42 82 82 82 18 18 18
37418 - 2 2 6 2 2 6 2 2 6 10 10 10
37419 - 94 94 94 182 182 182 218 218 218 242 242 242
37420 -250 250 250 253 253 253 253 253 253 250 250 250
37421 -234 234 234 253 253 253 253 253 253 253 253 253
37422 -253 253 253 253 253 253 253 253 253 246 246 246
37423 -238 238 238 226 226 226 210 210 210 202 202 202
37424 -195 195 195 195 195 195 210 210 210 158 158 158
37425 - 6 6 6 14 14 14 50 50 50 14 14 14
37426 - 2 2 6 2 2 6 2 2 6 2 2 6
37427 - 2 2 6 6 6 6 86 86 86 46 46 46
37428 - 18 18 18 6 6 6 0 0 0 0 0 0
37429 - 0 0 0 0 0 0 0 0 0 0 0 0
37430 - 0 0 0 0 0 0 0 0 0 0 0 0
37431 - 0 0 0 0 0 0 0 0 0 0 0 0
37432 - 0 0 0 0 0 0 0 0 0 0 0 0
37433 - 0 0 0 0 0 0 0 0 0 0 0 0
37434 - 0 0 0 0 0 0 0 0 0 0 0 0
37435 - 0 0 0 0 0 0 0 0 0 0 0 0
37436 - 0 0 0 0 0 0 0 0 0 6 6 6
37437 - 22 22 22 54 54 54 70 70 70 2 2 6
37438 - 2 2 6 10 10 10 2 2 6 22 22 22
37439 -166 166 166 231 231 231 250 250 250 253 253 253
37440 -253 253 253 253 253 253 253 253 253 250 250 250
37441 -242 242 242 253 253 253 253 253 253 253 253 253
37442 -253 253 253 253 253 253 253 253 253 253 253 253
37443 -253 253 253 253 253 253 253 253 253 246 246 246
37444 -231 231 231 206 206 206 198 198 198 226 226 226
37445 - 94 94 94 2 2 6 6 6 6 38 38 38
37446 - 30 30 30 2 2 6 2 2 6 2 2 6
37447 - 2 2 6 2 2 6 62 62 62 66 66 66
37448 - 26 26 26 10 10 10 0 0 0 0 0 0
37449 - 0 0 0 0 0 0 0 0 0 0 0 0
37450 - 0 0 0 0 0 0 0 0 0 0 0 0
37451 - 0 0 0 0 0 0 0 0 0 0 0 0
37452 - 0 0 0 0 0 0 0 0 0 0 0 0
37453 - 0 0 0 0 0 0 0 0 0 0 0 0
37454 - 0 0 0 0 0 0 0 0 0 0 0 0
37455 - 0 0 0 0 0 0 0 0 0 0 0 0
37456 - 0 0 0 0 0 0 0 0 0 10 10 10
37457 - 30 30 30 74 74 74 50 50 50 2 2 6
37458 - 26 26 26 26 26 26 2 2 6 106 106 106
37459 -238 238 238 253 253 253 253 253 253 253 253 253
37460 -253 253 253 253 253 253 253 253 253 253 253 253
37461 -253 253 253 253 253 253 253 253 253 253 253 253
37462 -253 253 253 253 253 253 253 253 253 253 253 253
37463 -253 253 253 253 253 253 253 253 253 253 253 253
37464 -253 253 253 246 246 246 218 218 218 202 202 202
37465 -210 210 210 14 14 14 2 2 6 2 2 6
37466 - 30 30 30 22 22 22 2 2 6 2 2 6
37467 - 2 2 6 2 2 6 18 18 18 86 86 86
37468 - 42 42 42 14 14 14 0 0 0 0 0 0
37469 - 0 0 0 0 0 0 0 0 0 0 0 0
37470 - 0 0 0 0 0 0 0 0 0 0 0 0
37471 - 0 0 0 0 0 0 0 0 0 0 0 0
37472 - 0 0 0 0 0 0 0 0 0 0 0 0
37473 - 0 0 0 0 0 0 0 0 0 0 0 0
37474 - 0 0 0 0 0 0 0 0 0 0 0 0
37475 - 0 0 0 0 0 0 0 0 0 0 0 0
37476 - 0 0 0 0 0 0 0 0 0 14 14 14
37477 - 42 42 42 90 90 90 22 22 22 2 2 6
37478 - 42 42 42 2 2 6 18 18 18 218 218 218
37479 -253 253 253 253 253 253 253 253 253 253 253 253
37480 -253 253 253 253 253 253 253 253 253 253 253 253
37481 -253 253 253 253 253 253 253 253 253 253 253 253
37482 -253 253 253 253 253 253 253 253 253 253 253 253
37483 -253 253 253 253 253 253 253 253 253 253 253 253
37484 -253 253 253 253 253 253 250 250 250 221 221 221
37485 -218 218 218 101 101 101 2 2 6 14 14 14
37486 - 18 18 18 38 38 38 10 10 10 2 2 6
37487 - 2 2 6 2 2 6 2 2 6 78 78 78
37488 - 58 58 58 22 22 22 6 6 6 0 0 0
37489 - 0 0 0 0 0 0 0 0 0 0 0 0
37490 - 0 0 0 0 0 0 0 0 0 0 0 0
37491 - 0 0 0 0 0 0 0 0 0 0 0 0
37492 - 0 0 0 0 0 0 0 0 0 0 0 0
37493 - 0 0 0 0 0 0 0 0 0 0 0 0
37494 - 0 0 0 0 0 0 0 0 0 0 0 0
37495 - 0 0 0 0 0 0 0 0 0 0 0 0
37496 - 0 0 0 0 0 0 6 6 6 18 18 18
37497 - 54 54 54 82 82 82 2 2 6 26 26 26
37498 - 22 22 22 2 2 6 123 123 123 253 253 253
37499 -253 253 253 253 253 253 253 253 253 253 253 253
37500 -253 253 253 253 253 253 253 253 253 253 253 253
37501 -253 253 253 253 253 253 253 253 253 253 253 253
37502 -253 253 253 253 253 253 253 253 253 253 253 253
37503 -253 253 253 253 253 253 253 253 253 253 253 253
37504 -253 253 253 253 253 253 253 253 253 250 250 250
37505 -238 238 238 198 198 198 6 6 6 38 38 38
37506 - 58 58 58 26 26 26 38 38 38 2 2 6
37507 - 2 2 6 2 2 6 2 2 6 46 46 46
37508 - 78 78 78 30 30 30 10 10 10 0 0 0
37509 - 0 0 0 0 0 0 0 0 0 0 0 0
37510 - 0 0 0 0 0 0 0 0 0 0 0 0
37511 - 0 0 0 0 0 0 0 0 0 0 0 0
37512 - 0 0 0 0 0 0 0 0 0 0 0 0
37513 - 0 0 0 0 0 0 0 0 0 0 0 0
37514 - 0 0 0 0 0 0 0 0 0 0 0 0
37515 - 0 0 0 0 0 0 0 0 0 0 0 0
37516 - 0 0 0 0 0 0 10 10 10 30 30 30
37517 - 74 74 74 58 58 58 2 2 6 42 42 42
37518 - 2 2 6 22 22 22 231 231 231 253 253 253
37519 -253 253 253 253 253 253 253 253 253 253 253 253
37520 -253 253 253 253 253 253 253 253 253 250 250 250
37521 -253 253 253 253 253 253 253 253 253 253 253 253
37522 -253 253 253 253 253 253 253 253 253 253 253 253
37523 -253 253 253 253 253 253 253 253 253 253 253 253
37524 -253 253 253 253 253 253 253 253 253 253 253 253
37525 -253 253 253 246 246 246 46 46 46 38 38 38
37526 - 42 42 42 14 14 14 38 38 38 14 14 14
37527 - 2 2 6 2 2 6 2 2 6 6 6 6
37528 - 86 86 86 46 46 46 14 14 14 0 0 0
37529 - 0 0 0 0 0 0 0 0 0 0 0 0
37530 - 0 0 0 0 0 0 0 0 0 0 0 0
37531 - 0 0 0 0 0 0 0 0 0 0 0 0
37532 - 0 0 0 0 0 0 0 0 0 0 0 0
37533 - 0 0 0 0 0 0 0 0 0 0 0 0
37534 - 0 0 0 0 0 0 0 0 0 0 0 0
37535 - 0 0 0 0 0 0 0 0 0 0 0 0
37536 - 0 0 0 6 6 6 14 14 14 42 42 42
37537 - 90 90 90 18 18 18 18 18 18 26 26 26
37538 - 2 2 6 116 116 116 253 253 253 253 253 253
37539 -253 253 253 253 253 253 253 253 253 253 253 253
37540 -253 253 253 253 253 253 250 250 250 238 238 238
37541 -253 253 253 253 253 253 253 253 253 253 253 253
37542 -253 253 253 253 253 253 253 253 253 253 253 253
37543 -253 253 253 253 253 253 253 253 253 253 253 253
37544 -253 253 253 253 253 253 253 253 253 253 253 253
37545 -253 253 253 253 253 253 94 94 94 6 6 6
37546 - 2 2 6 2 2 6 10 10 10 34 34 34
37547 - 2 2 6 2 2 6 2 2 6 2 2 6
37548 - 74 74 74 58 58 58 22 22 22 6 6 6
37549 - 0 0 0 0 0 0 0 0 0 0 0 0
37550 - 0 0 0 0 0 0 0 0 0 0 0 0
37551 - 0 0 0 0 0 0 0 0 0 0 0 0
37552 - 0 0 0 0 0 0 0 0 0 0 0 0
37553 - 0 0 0 0 0 0 0 0 0 0 0 0
37554 - 0 0 0 0 0 0 0 0 0 0 0 0
37555 - 0 0 0 0 0 0 0 0 0 0 0 0
37556 - 0 0 0 10 10 10 26 26 26 66 66 66
37557 - 82 82 82 2 2 6 38 38 38 6 6 6
37558 - 14 14 14 210 210 210 253 253 253 253 253 253
37559 -253 253 253 253 253 253 253 253 253 253 253 253
37560 -253 253 253 253 253 253 246 246 246 242 242 242
37561 -253 253 253 253 253 253 253 253 253 253 253 253
37562 -253 253 253 253 253 253 253 253 253 253 253 253
37563 -253 253 253 253 253 253 253 253 253 253 253 253
37564 -253 253 253 253 253 253 253 253 253 253 253 253
37565 -253 253 253 253 253 253 144 144 144 2 2 6
37566 - 2 2 6 2 2 6 2 2 6 46 46 46
37567 - 2 2 6 2 2 6 2 2 6 2 2 6
37568 - 42 42 42 74 74 74 30 30 30 10 10 10
37569 - 0 0 0 0 0 0 0 0 0 0 0 0
37570 - 0 0 0 0 0 0 0 0 0 0 0 0
37571 - 0 0 0 0 0 0 0 0 0 0 0 0
37572 - 0 0 0 0 0 0 0 0 0 0 0 0
37573 - 0 0 0 0 0 0 0 0 0 0 0 0
37574 - 0 0 0 0 0 0 0 0 0 0 0 0
37575 - 0 0 0 0 0 0 0 0 0 0 0 0
37576 - 6 6 6 14 14 14 42 42 42 90 90 90
37577 - 26 26 26 6 6 6 42 42 42 2 2 6
37578 - 74 74 74 250 250 250 253 253 253 253 253 253
37579 -253 253 253 253 253 253 253 253 253 253 253 253
37580 -253 253 253 253 253 253 242 242 242 242 242 242
37581 -253 253 253 253 253 253 253 253 253 253 253 253
37582 -253 253 253 253 253 253 253 253 253 253 253 253
37583 -253 253 253 253 253 253 253 253 253 253 253 253
37584 -253 253 253 253 253 253 253 253 253 253 253 253
37585 -253 253 253 253 253 253 182 182 182 2 2 6
37586 - 2 2 6 2 2 6 2 2 6 46 46 46
37587 - 2 2 6 2 2 6 2 2 6 2 2 6
37588 - 10 10 10 86 86 86 38 38 38 10 10 10
37589 - 0 0 0 0 0 0 0 0 0 0 0 0
37590 - 0 0 0 0 0 0 0 0 0 0 0 0
37591 - 0 0 0 0 0 0 0 0 0 0 0 0
37592 - 0 0 0 0 0 0 0 0 0 0 0 0
37593 - 0 0 0 0 0 0 0 0 0 0 0 0
37594 - 0 0 0 0 0 0 0 0 0 0 0 0
37595 - 0 0 0 0 0 0 0 0 0 0 0 0
37596 - 10 10 10 26 26 26 66 66 66 82 82 82
37597 - 2 2 6 22 22 22 18 18 18 2 2 6
37598 -149 149 149 253 253 253 253 253 253 253 253 253
37599 -253 253 253 253 253 253 253 253 253 253 253 253
37600 -253 253 253 253 253 253 234 234 234 242 242 242
37601 -253 253 253 253 253 253 253 253 253 253 253 253
37602 -253 253 253 253 253 253 253 253 253 253 253 253
37603 -253 253 253 253 253 253 253 253 253 253 253 253
37604 -253 253 253 253 253 253 253 253 253 253 253 253
37605 -253 253 253 253 253 253 206 206 206 2 2 6
37606 - 2 2 6 2 2 6 2 2 6 38 38 38
37607 - 2 2 6 2 2 6 2 2 6 2 2 6
37608 - 6 6 6 86 86 86 46 46 46 14 14 14
37609 - 0 0 0 0 0 0 0 0 0 0 0 0
37610 - 0 0 0 0 0 0 0 0 0 0 0 0
37611 - 0 0 0 0 0 0 0 0 0 0 0 0
37612 - 0 0 0 0 0 0 0 0 0 0 0 0
37613 - 0 0 0 0 0 0 0 0 0 0 0 0
37614 - 0 0 0 0 0 0 0 0 0 0 0 0
37615 - 0 0 0 0 0 0 0 0 0 6 6 6
37616 - 18 18 18 46 46 46 86 86 86 18 18 18
37617 - 2 2 6 34 34 34 10 10 10 6 6 6
37618 -210 210 210 253 253 253 253 253 253 253 253 253
37619 -253 253 253 253 253 253 253 253 253 253 253 253
37620 -253 253 253 253 253 253 234 234 234 242 242 242
37621 -253 253 253 253 253 253 253 253 253 253 253 253
37622 -253 253 253 253 253 253 253 253 253 253 253 253
37623 -253 253 253 253 253 253 253 253 253 253 253 253
37624 -253 253 253 253 253 253 253 253 253 253 253 253
37625 -253 253 253 253 253 253 221 221 221 6 6 6
37626 - 2 2 6 2 2 6 6 6 6 30 30 30
37627 - 2 2 6 2 2 6 2 2 6 2 2 6
37628 - 2 2 6 82 82 82 54 54 54 18 18 18
37629 - 6 6 6 0 0 0 0 0 0 0 0 0
37630 - 0 0 0 0 0 0 0 0 0 0 0 0
37631 - 0 0 0 0 0 0 0 0 0 0 0 0
37632 - 0 0 0 0 0 0 0 0 0 0 0 0
37633 - 0 0 0 0 0 0 0 0 0 0 0 0
37634 - 0 0 0 0 0 0 0 0 0 0 0 0
37635 - 0 0 0 0 0 0 0 0 0 10 10 10
37636 - 26 26 26 66 66 66 62 62 62 2 2 6
37637 - 2 2 6 38 38 38 10 10 10 26 26 26
37638 -238 238 238 253 253 253 253 253 253 253 253 253
37639 -253 253 253 253 253 253 253 253 253 253 253 253
37640 -253 253 253 253 253 253 231 231 231 238 238 238
37641 -253 253 253 253 253 253 253 253 253 253 253 253
37642 -253 253 253 253 253 253 253 253 253 253 253 253
37643 -253 253 253 253 253 253 253 253 253 253 253 253
37644 -253 253 253 253 253 253 253 253 253 253 253 253
37645 -253 253 253 253 253 253 231 231 231 6 6 6
37646 - 2 2 6 2 2 6 10 10 10 30 30 30
37647 - 2 2 6 2 2 6 2 2 6 2 2 6
37648 - 2 2 6 66 66 66 58 58 58 22 22 22
37649 - 6 6 6 0 0 0 0 0 0 0 0 0
37650 - 0 0 0 0 0 0 0 0 0 0 0 0
37651 - 0 0 0 0 0 0 0 0 0 0 0 0
37652 - 0 0 0 0 0 0 0 0 0 0 0 0
37653 - 0 0 0 0 0 0 0 0 0 0 0 0
37654 - 0 0 0 0 0 0 0 0 0 0 0 0
37655 - 0 0 0 0 0 0 0 0 0 10 10 10
37656 - 38 38 38 78 78 78 6 6 6 2 2 6
37657 - 2 2 6 46 46 46 14 14 14 42 42 42
37658 -246 246 246 253 253 253 253 253 253 253 253 253
37659 -253 253 253 253 253 253 253 253 253 253 253 253
37660 -253 253 253 253 253 253 231 231 231 242 242 242
37661 -253 253 253 253 253 253 253 253 253 253 253 253
37662 -253 253 253 253 253 253 253 253 253 253 253 253
37663 -253 253 253 253 253 253 253 253 253 253 253 253
37664 -253 253 253 253 253 253 253 253 253 253 253 253
37665 -253 253 253 253 253 253 234 234 234 10 10 10
37666 - 2 2 6 2 2 6 22 22 22 14 14 14
37667 - 2 2 6 2 2 6 2 2 6 2 2 6
37668 - 2 2 6 66 66 66 62 62 62 22 22 22
37669 - 6 6 6 0 0 0 0 0 0 0 0 0
37670 - 0 0 0 0 0 0 0 0 0 0 0 0
37671 - 0 0 0 0 0 0 0 0 0 0 0 0
37672 - 0 0 0 0 0 0 0 0 0 0 0 0
37673 - 0 0 0 0 0 0 0 0 0 0 0 0
37674 - 0 0 0 0 0 0 0 0 0 0 0 0
37675 - 0 0 0 0 0 0 6 6 6 18 18 18
37676 - 50 50 50 74 74 74 2 2 6 2 2 6
37677 - 14 14 14 70 70 70 34 34 34 62 62 62
37678 -250 250 250 253 253 253 253 253 253 253 253 253
37679 -253 253 253 253 253 253 253 253 253 253 253 253
37680 -253 253 253 253 253 253 231 231 231 246 246 246
37681 -253 253 253 253 253 253 253 253 253 253 253 253
37682 -253 253 253 253 253 253 253 253 253 253 253 253
37683 -253 253 253 253 253 253 253 253 253 253 253 253
37684 -253 253 253 253 253 253 253 253 253 253 253 253
37685 -253 253 253 253 253 253 234 234 234 14 14 14
37686 - 2 2 6 2 2 6 30 30 30 2 2 6
37687 - 2 2 6 2 2 6 2 2 6 2 2 6
37688 - 2 2 6 66 66 66 62 62 62 22 22 22
37689 - 6 6 6 0 0 0 0 0 0 0 0 0
37690 - 0 0 0 0 0 0 0 0 0 0 0 0
37691 - 0 0 0 0 0 0 0 0 0 0 0 0
37692 - 0 0 0 0 0 0 0 0 0 0 0 0
37693 - 0 0 0 0 0 0 0 0 0 0 0 0
37694 - 0 0 0 0 0 0 0 0 0 0 0 0
37695 - 0 0 0 0 0 0 6 6 6 18 18 18
37696 - 54 54 54 62 62 62 2 2 6 2 2 6
37697 - 2 2 6 30 30 30 46 46 46 70 70 70
37698 -250 250 250 253 253 253 253 253 253 253 253 253
37699 -253 253 253 253 253 253 253 253 253 253 253 253
37700 -253 253 253 253 253 253 231 231 231 246 246 246
37701 -253 253 253 253 253 253 253 253 253 253 253 253
37702 -253 253 253 253 253 253 253 253 253 253 253 253
37703 -253 253 253 253 253 253 253 253 253 253 253 253
37704 -253 253 253 253 253 253 253 253 253 253 253 253
37705 -253 253 253 253 253 253 226 226 226 10 10 10
37706 - 2 2 6 6 6 6 30 30 30 2 2 6
37707 - 2 2 6 2 2 6 2 2 6 2 2 6
37708 - 2 2 6 66 66 66 58 58 58 22 22 22
37709 - 6 6 6 0 0 0 0 0 0 0 0 0
37710 - 0 0 0 0 0 0 0 0 0 0 0 0
37711 - 0 0 0 0 0 0 0 0 0 0 0 0
37712 - 0 0 0 0 0 0 0 0 0 0 0 0
37713 - 0 0 0 0 0 0 0 0 0 0 0 0
37714 - 0 0 0 0 0 0 0 0 0 0 0 0
37715 - 0 0 0 0 0 0 6 6 6 22 22 22
37716 - 58 58 58 62 62 62 2 2 6 2 2 6
37717 - 2 2 6 2 2 6 30 30 30 78 78 78
37718 -250 250 250 253 253 253 253 253 253 253 253 253
37719 -253 253 253 253 253 253 253 253 253 253 253 253
37720 -253 253 253 253 253 253 231 231 231 246 246 246
37721 -253 253 253 253 253 253 253 253 253 253 253 253
37722 -253 253 253 253 253 253 253 253 253 253 253 253
37723 -253 253 253 253 253 253 253 253 253 253 253 253
37724 -253 253 253 253 253 253 253 253 253 253 253 253
37725 -253 253 253 253 253 253 206 206 206 2 2 6
37726 - 22 22 22 34 34 34 18 14 6 22 22 22
37727 - 26 26 26 18 18 18 6 6 6 2 2 6
37728 - 2 2 6 82 82 82 54 54 54 18 18 18
37729 - 6 6 6 0 0 0 0 0 0 0 0 0
37730 - 0 0 0 0 0 0 0 0 0 0 0 0
37731 - 0 0 0 0 0 0 0 0 0 0 0 0
37732 - 0 0 0 0 0 0 0 0 0 0 0 0
37733 - 0 0 0 0 0 0 0 0 0 0 0 0
37734 - 0 0 0 0 0 0 0 0 0 0 0 0
37735 - 0 0 0 0 0 0 6 6 6 26 26 26
37736 - 62 62 62 106 106 106 74 54 14 185 133 11
37737 -210 162 10 121 92 8 6 6 6 62 62 62
37738 -238 238 238 253 253 253 253 253 253 253 253 253
37739 -253 253 253 253 253 253 253 253 253 253 253 253
37740 -253 253 253 253 253 253 231 231 231 246 246 246
37741 -253 253 253 253 253 253 253 253 253 253 253 253
37742 -253 253 253 253 253 253 253 253 253 253 253 253
37743 -253 253 253 253 253 253 253 253 253 253 253 253
37744 -253 253 253 253 253 253 253 253 253 253 253 253
37745 -253 253 253 253 253 253 158 158 158 18 18 18
37746 - 14 14 14 2 2 6 2 2 6 2 2 6
37747 - 6 6 6 18 18 18 66 66 66 38 38 38
37748 - 6 6 6 94 94 94 50 50 50 18 18 18
37749 - 6 6 6 0 0 0 0 0 0 0 0 0
37750 - 0 0 0 0 0 0 0 0 0 0 0 0
37751 - 0 0 0 0 0 0 0 0 0 0 0 0
37752 - 0 0 0 0 0 0 0 0 0 0 0 0
37753 - 0 0 0 0 0 0 0 0 0 0 0 0
37754 - 0 0 0 0 0 0 0 0 0 6 6 6
37755 - 10 10 10 10 10 10 18 18 18 38 38 38
37756 - 78 78 78 142 134 106 216 158 10 242 186 14
37757 -246 190 14 246 190 14 156 118 10 10 10 10
37758 - 90 90 90 238 238 238 253 253 253 253 253 253
37759 -253 253 253 253 253 253 253 253 253 253 253 253
37760 -253 253 253 253 253 253 231 231 231 250 250 250
37761 -253 253 253 253 253 253 253 253 253 253 253 253
37762 -253 253 253 253 253 253 253 253 253 253 253 253
37763 -253 253 253 253 253 253 253 253 253 253 253 253
37764 -253 253 253 253 253 253 253 253 253 246 230 190
37765 -238 204 91 238 204 91 181 142 44 37 26 9
37766 - 2 2 6 2 2 6 2 2 6 2 2 6
37767 - 2 2 6 2 2 6 38 38 38 46 46 46
37768 - 26 26 26 106 106 106 54 54 54 18 18 18
37769 - 6 6 6 0 0 0 0 0 0 0 0 0
37770 - 0 0 0 0 0 0 0 0 0 0 0 0
37771 - 0 0 0 0 0 0 0 0 0 0 0 0
37772 - 0 0 0 0 0 0 0 0 0 0 0 0
37773 - 0 0 0 0 0 0 0 0 0 0 0 0
37774 - 0 0 0 6 6 6 14 14 14 22 22 22
37775 - 30 30 30 38 38 38 50 50 50 70 70 70
37776 -106 106 106 190 142 34 226 170 11 242 186 14
37777 -246 190 14 246 190 14 246 190 14 154 114 10
37778 - 6 6 6 74 74 74 226 226 226 253 253 253
37779 -253 253 253 253 253 253 253 253 253 253 253 253
37780 -253 253 253 253 253 253 231 231 231 250 250 250
37781 -253 253 253 253 253 253 253 253 253 253 253 253
37782 -253 253 253 253 253 253 253 253 253 253 253 253
37783 -253 253 253 253 253 253 253 253 253 253 253 253
37784 -253 253 253 253 253 253 253 253 253 228 184 62
37785 -241 196 14 241 208 19 232 195 16 38 30 10
37786 - 2 2 6 2 2 6 2 2 6 2 2 6
37787 - 2 2 6 6 6 6 30 30 30 26 26 26
37788 -203 166 17 154 142 90 66 66 66 26 26 26
37789 - 6 6 6 0 0 0 0 0 0 0 0 0
37790 - 0 0 0 0 0 0 0 0 0 0 0 0
37791 - 0 0 0 0 0 0 0 0 0 0 0 0
37792 - 0 0 0 0 0 0 0 0 0 0 0 0
37793 - 0 0 0 0 0 0 0 0 0 0 0 0
37794 - 6 6 6 18 18 18 38 38 38 58 58 58
37795 - 78 78 78 86 86 86 101 101 101 123 123 123
37796 -175 146 61 210 150 10 234 174 13 246 186 14
37797 -246 190 14 246 190 14 246 190 14 238 190 10
37798 -102 78 10 2 2 6 46 46 46 198 198 198
37799 -253 253 253 253 253 253 253 253 253 253 253 253
37800 -253 253 253 253 253 253 234 234 234 242 242 242
37801 -253 253 253 253 253 253 253 253 253 253 253 253
37802 -253 253 253 253 253 253 253 253 253 253 253 253
37803 -253 253 253 253 253 253 253 253 253 253 253 253
37804 -253 253 253 253 253 253 253 253 253 224 178 62
37805 -242 186 14 241 196 14 210 166 10 22 18 6
37806 - 2 2 6 2 2 6 2 2 6 2 2 6
37807 - 2 2 6 2 2 6 6 6 6 121 92 8
37808 -238 202 15 232 195 16 82 82 82 34 34 34
37809 - 10 10 10 0 0 0 0 0 0 0 0 0
37810 - 0 0 0 0 0 0 0 0 0 0 0 0
37811 - 0 0 0 0 0 0 0 0 0 0 0 0
37812 - 0 0 0 0 0 0 0 0 0 0 0 0
37813 - 0 0 0 0 0 0 0 0 0 0 0 0
37814 - 14 14 14 38 38 38 70 70 70 154 122 46
37815 -190 142 34 200 144 11 197 138 11 197 138 11
37816 -213 154 11 226 170 11 242 186 14 246 190 14
37817 -246 190 14 246 190 14 246 190 14 246 190 14
37818 -225 175 15 46 32 6 2 2 6 22 22 22
37819 -158 158 158 250 250 250 253 253 253 253 253 253
37820 -253 253 253 253 253 253 253 253 253 253 253 253
37821 -253 253 253 253 253 253 253 253 253 253 253 253
37822 -253 253 253 253 253 253 253 253 253 253 253 253
37823 -253 253 253 253 253 253 253 253 253 253 253 253
37824 -253 253 253 250 250 250 242 242 242 224 178 62
37825 -239 182 13 236 186 11 213 154 11 46 32 6
37826 - 2 2 6 2 2 6 2 2 6 2 2 6
37827 - 2 2 6 2 2 6 61 42 6 225 175 15
37828 -238 190 10 236 186 11 112 100 78 42 42 42
37829 - 14 14 14 0 0 0 0 0 0 0 0 0
37830 - 0 0 0 0 0 0 0 0 0 0 0 0
37831 - 0 0 0 0 0 0 0 0 0 0 0 0
37832 - 0 0 0 0 0 0 0 0 0 0 0 0
37833 - 0 0 0 0 0 0 0 0 0 6 6 6
37834 - 22 22 22 54 54 54 154 122 46 213 154 11
37835 -226 170 11 230 174 11 226 170 11 226 170 11
37836 -236 178 12 242 186 14 246 190 14 246 190 14
37837 -246 190 14 246 190 14 246 190 14 246 190 14
37838 -241 196 14 184 144 12 10 10 10 2 2 6
37839 - 6 6 6 116 116 116 242 242 242 253 253 253
37840 -253 253 253 253 253 253 253 253 253 253 253 253
37841 -253 253 253 253 253 253 253 253 253 253 253 253
37842 -253 253 253 253 253 253 253 253 253 253 253 253
37843 -253 253 253 253 253 253 253 253 253 253 253 253
37844 -253 253 253 231 231 231 198 198 198 214 170 54
37845 -236 178 12 236 178 12 210 150 10 137 92 6
37846 - 18 14 6 2 2 6 2 2 6 2 2 6
37847 - 6 6 6 70 47 6 200 144 11 236 178 12
37848 -239 182 13 239 182 13 124 112 88 58 58 58
37849 - 22 22 22 6 6 6 0 0 0 0 0 0
37850 - 0 0 0 0 0 0 0 0 0 0 0 0
37851 - 0 0 0 0 0 0 0 0 0 0 0 0
37852 - 0 0 0 0 0 0 0 0 0 0 0 0
37853 - 0 0 0 0 0 0 0 0 0 10 10 10
37854 - 30 30 30 70 70 70 180 133 36 226 170 11
37855 -239 182 13 242 186 14 242 186 14 246 186 14
37856 -246 190 14 246 190 14 246 190 14 246 190 14
37857 -246 190 14 246 190 14 246 190 14 246 190 14
37858 -246 190 14 232 195 16 98 70 6 2 2 6
37859 - 2 2 6 2 2 6 66 66 66 221 221 221
37860 -253 253 253 253 253 253 253 253 253 253 253 253
37861 -253 253 253 253 253 253 253 253 253 253 253 253
37862 -253 253 253 253 253 253 253 253 253 253 253 253
37863 -253 253 253 253 253 253 253 253 253 253 253 253
37864 -253 253 253 206 206 206 198 198 198 214 166 58
37865 -230 174 11 230 174 11 216 158 10 192 133 9
37866 -163 110 8 116 81 8 102 78 10 116 81 8
37867 -167 114 7 197 138 11 226 170 11 239 182 13
37868 -242 186 14 242 186 14 162 146 94 78 78 78
37869 - 34 34 34 14 14 14 6 6 6 0 0 0
37870 - 0 0 0 0 0 0 0 0 0 0 0 0
37871 - 0 0 0 0 0 0 0 0 0 0 0 0
37872 - 0 0 0 0 0 0 0 0 0 0 0 0
37873 - 0 0 0 0 0 0 0 0 0 6 6 6
37874 - 30 30 30 78 78 78 190 142 34 226 170 11
37875 -239 182 13 246 190 14 246 190 14 246 190 14
37876 -246 190 14 246 190 14 246 190 14 246 190 14
37877 -246 190 14 246 190 14 246 190 14 246 190 14
37878 -246 190 14 241 196 14 203 166 17 22 18 6
37879 - 2 2 6 2 2 6 2 2 6 38 38 38
37880 -218 218 218 253 253 253 253 253 253 253 253 253
37881 -253 253 253 253 253 253 253 253 253 253 253 253
37882 -253 253 253 253 253 253 253 253 253 253 253 253
37883 -253 253 253 253 253 253 253 253 253 253 253 253
37884 -250 250 250 206 206 206 198 198 198 202 162 69
37885 -226 170 11 236 178 12 224 166 10 210 150 10
37886 -200 144 11 197 138 11 192 133 9 197 138 11
37887 -210 150 10 226 170 11 242 186 14 246 190 14
37888 -246 190 14 246 186 14 225 175 15 124 112 88
37889 - 62 62 62 30 30 30 14 14 14 6 6 6
37890 - 0 0 0 0 0 0 0 0 0 0 0 0
37891 - 0 0 0 0 0 0 0 0 0 0 0 0
37892 - 0 0 0 0 0 0 0 0 0 0 0 0
37893 - 0 0 0 0 0 0 0 0 0 10 10 10
37894 - 30 30 30 78 78 78 174 135 50 224 166 10
37895 -239 182 13 246 190 14 246 190 14 246 190 14
37896 -246 190 14 246 190 14 246 190 14 246 190 14
37897 -246 190 14 246 190 14 246 190 14 246 190 14
37898 -246 190 14 246 190 14 241 196 14 139 102 15
37899 - 2 2 6 2 2 6 2 2 6 2 2 6
37900 - 78 78 78 250 250 250 253 253 253 253 253 253
37901 -253 253 253 253 253 253 253 253 253 253 253 253
37902 -253 253 253 253 253 253 253 253 253 253 253 253
37903 -253 253 253 253 253 253 253 253 253 253 253 253
37904 -250 250 250 214 214 214 198 198 198 190 150 46
37905 -219 162 10 236 178 12 234 174 13 224 166 10
37906 -216 158 10 213 154 11 213 154 11 216 158 10
37907 -226 170 11 239 182 13 246 190 14 246 190 14
37908 -246 190 14 246 190 14 242 186 14 206 162 42
37909 -101 101 101 58 58 58 30 30 30 14 14 14
37910 - 6 6 6 0 0 0 0 0 0 0 0 0
37911 - 0 0 0 0 0 0 0 0 0 0 0 0
37912 - 0 0 0 0 0 0 0 0 0 0 0 0
37913 - 0 0 0 0 0 0 0 0 0 10 10 10
37914 - 30 30 30 74 74 74 174 135 50 216 158 10
37915 -236 178 12 246 190 14 246 190 14 246 190 14
37916 -246 190 14 246 190 14 246 190 14 246 190 14
37917 -246 190 14 246 190 14 246 190 14 246 190 14
37918 -246 190 14 246 190 14 241 196 14 226 184 13
37919 - 61 42 6 2 2 6 2 2 6 2 2 6
37920 - 22 22 22 238 238 238 253 253 253 253 253 253
37921 -253 253 253 253 253 253 253 253 253 253 253 253
37922 -253 253 253 253 253 253 253 253 253 253 253 253
37923 -253 253 253 253 253 253 253 253 253 253 253 253
37924 -253 253 253 226 226 226 187 187 187 180 133 36
37925 -216 158 10 236 178 12 239 182 13 236 178 12
37926 -230 174 11 226 170 11 226 170 11 230 174 11
37927 -236 178 12 242 186 14 246 190 14 246 190 14
37928 -246 190 14 246 190 14 246 186 14 239 182 13
37929 -206 162 42 106 106 106 66 66 66 34 34 34
37930 - 14 14 14 6 6 6 0 0 0 0 0 0
37931 - 0 0 0 0 0 0 0 0 0 0 0 0
37932 - 0 0 0 0 0 0 0 0 0 0 0 0
37933 - 0 0 0 0 0 0 0 0 0 6 6 6
37934 - 26 26 26 70 70 70 163 133 67 213 154 11
37935 -236 178 12 246 190 14 246 190 14 246 190 14
37936 -246 190 14 246 190 14 246 190 14 246 190 14
37937 -246 190 14 246 190 14 246 190 14 246 190 14
37938 -246 190 14 246 190 14 246 190 14 241 196 14
37939 -190 146 13 18 14 6 2 2 6 2 2 6
37940 - 46 46 46 246 246 246 253 253 253 253 253 253
37941 -253 253 253 253 253 253 253 253 253 253 253 253
37942 -253 253 253 253 253 253 253 253 253 253 253 253
37943 -253 253 253 253 253 253 253 253 253 253 253 253
37944 -253 253 253 221 221 221 86 86 86 156 107 11
37945 -216 158 10 236 178 12 242 186 14 246 186 14
37946 -242 186 14 239 182 13 239 182 13 242 186 14
37947 -242 186 14 246 186 14 246 190 14 246 190 14
37948 -246 190 14 246 190 14 246 190 14 246 190 14
37949 -242 186 14 225 175 15 142 122 72 66 66 66
37950 - 30 30 30 10 10 10 0 0 0 0 0 0
37951 - 0 0 0 0 0 0 0 0 0 0 0 0
37952 - 0 0 0 0 0 0 0 0 0 0 0 0
37953 - 0 0 0 0 0 0 0 0 0 6 6 6
37954 - 26 26 26 70 70 70 163 133 67 210 150 10
37955 -236 178 12 246 190 14 246 190 14 246 190 14
37956 -246 190 14 246 190 14 246 190 14 246 190 14
37957 -246 190 14 246 190 14 246 190 14 246 190 14
37958 -246 190 14 246 190 14 246 190 14 246 190 14
37959 -232 195 16 121 92 8 34 34 34 106 106 106
37960 -221 221 221 253 253 253 253 253 253 253 253 253
37961 -253 253 253 253 253 253 253 253 253 253 253 253
37962 -253 253 253 253 253 253 253 253 253 253 253 253
37963 -253 253 253 253 253 253 253 253 253 253 253 253
37964 -242 242 242 82 82 82 18 14 6 163 110 8
37965 -216 158 10 236 178 12 242 186 14 246 190 14
37966 -246 190 14 246 190 14 246 190 14 246 190 14
37967 -246 190 14 246 190 14 246 190 14 246 190 14
37968 -246 190 14 246 190 14 246 190 14 246 190 14
37969 -246 190 14 246 190 14 242 186 14 163 133 67
37970 - 46 46 46 18 18 18 6 6 6 0 0 0
37971 - 0 0 0 0 0 0 0 0 0 0 0 0
37972 - 0 0 0 0 0 0 0 0 0 0 0 0
37973 - 0 0 0 0 0 0 0 0 0 10 10 10
37974 - 30 30 30 78 78 78 163 133 67 210 150 10
37975 -236 178 12 246 186 14 246 190 14 246 190 14
37976 -246 190 14 246 190 14 246 190 14 246 190 14
37977 -246 190 14 246 190 14 246 190 14 246 190 14
37978 -246 190 14 246 190 14 246 190 14 246 190 14
37979 -241 196 14 215 174 15 190 178 144 253 253 253
37980 -253 253 253 253 253 253 253 253 253 253 253 253
37981 -253 253 253 253 253 253 253 253 253 253 253 253
37982 -253 253 253 253 253 253 253 253 253 253 253 253
37983 -253 253 253 253 253 253 253 253 253 218 218 218
37984 - 58 58 58 2 2 6 22 18 6 167 114 7
37985 -216 158 10 236 178 12 246 186 14 246 190 14
37986 -246 190 14 246 190 14 246 190 14 246 190 14
37987 -246 190 14 246 190 14 246 190 14 246 190 14
37988 -246 190 14 246 190 14 246 190 14 246 190 14
37989 -246 190 14 246 186 14 242 186 14 190 150 46
37990 - 54 54 54 22 22 22 6 6 6 0 0 0
37991 - 0 0 0 0 0 0 0 0 0 0 0 0
37992 - 0 0 0 0 0 0 0 0 0 0 0 0
37993 - 0 0 0 0 0 0 0 0 0 14 14 14
37994 - 38 38 38 86 86 86 180 133 36 213 154 11
37995 -236 178 12 246 186 14 246 190 14 246 190 14
37996 -246 190 14 246 190 14 246 190 14 246 190 14
37997 -246 190 14 246 190 14 246 190 14 246 190 14
37998 -246 190 14 246 190 14 246 190 14 246 190 14
37999 -246 190 14 232 195 16 190 146 13 214 214 214
38000 -253 253 253 253 253 253 253 253 253 253 253 253
38001 -253 253 253 253 253 253 253 253 253 253 253 253
38002 -253 253 253 253 253 253 253 253 253 253 253 253
38003 -253 253 253 250 250 250 170 170 170 26 26 26
38004 - 2 2 6 2 2 6 37 26 9 163 110 8
38005 -219 162 10 239 182 13 246 186 14 246 190 14
38006 -246 190 14 246 190 14 246 190 14 246 190 14
38007 -246 190 14 246 190 14 246 190 14 246 190 14
38008 -246 190 14 246 190 14 246 190 14 246 190 14
38009 -246 186 14 236 178 12 224 166 10 142 122 72
38010 - 46 46 46 18 18 18 6 6 6 0 0 0
38011 - 0 0 0 0 0 0 0 0 0 0 0 0
38012 - 0 0 0 0 0 0 0 0 0 0 0 0
38013 - 0 0 0 0 0 0 6 6 6 18 18 18
38014 - 50 50 50 109 106 95 192 133 9 224 166 10
38015 -242 186 14 246 190 14 246 190 14 246 190 14
38016 -246 190 14 246 190 14 246 190 14 246 190 14
38017 -246 190 14 246 190 14 246 190 14 246 190 14
38018 -246 190 14 246 190 14 246 190 14 246 190 14
38019 -242 186 14 226 184 13 210 162 10 142 110 46
38020 -226 226 226 253 253 253 253 253 253 253 253 253
38021 -253 253 253 253 253 253 253 253 253 253 253 253
38022 -253 253 253 253 253 253 253 253 253 253 253 253
38023 -198 198 198 66 66 66 2 2 6 2 2 6
38024 - 2 2 6 2 2 6 50 34 6 156 107 11
38025 -219 162 10 239 182 13 246 186 14 246 190 14
38026 -246 190 14 246 190 14 246 190 14 246 190 14
38027 -246 190 14 246 190 14 246 190 14 246 190 14
38028 -246 190 14 246 190 14 246 190 14 242 186 14
38029 -234 174 13 213 154 11 154 122 46 66 66 66
38030 - 30 30 30 10 10 10 0 0 0 0 0 0
38031 - 0 0 0 0 0 0 0 0 0 0 0 0
38032 - 0 0 0 0 0 0 0 0 0 0 0 0
38033 - 0 0 0 0 0 0 6 6 6 22 22 22
38034 - 58 58 58 154 121 60 206 145 10 234 174 13
38035 -242 186 14 246 186 14 246 190 14 246 190 14
38036 -246 190 14 246 190 14 246 190 14 246 190 14
38037 -246 190 14 246 190 14 246 190 14 246 190 14
38038 -246 190 14 246 190 14 246 190 14 246 190 14
38039 -246 186 14 236 178 12 210 162 10 163 110 8
38040 - 61 42 6 138 138 138 218 218 218 250 250 250
38041 -253 253 253 253 253 253 253 253 253 250 250 250
38042 -242 242 242 210 210 210 144 144 144 66 66 66
38043 - 6 6 6 2 2 6 2 2 6 2 2 6
38044 - 2 2 6 2 2 6 61 42 6 163 110 8
38045 -216 158 10 236 178 12 246 190 14 246 190 14
38046 -246 190 14 246 190 14 246 190 14 246 190 14
38047 -246 190 14 246 190 14 246 190 14 246 190 14
38048 -246 190 14 239 182 13 230 174 11 216 158 10
38049 -190 142 34 124 112 88 70 70 70 38 38 38
38050 - 18 18 18 6 6 6 0 0 0 0 0 0
38051 - 0 0 0 0 0 0 0 0 0 0 0 0
38052 - 0 0 0 0 0 0 0 0 0 0 0 0
38053 - 0 0 0 0 0 0 6 6 6 22 22 22
38054 - 62 62 62 168 124 44 206 145 10 224 166 10
38055 -236 178 12 239 182 13 242 186 14 242 186 14
38056 -246 186 14 246 190 14 246 190 14 246 190 14
38057 -246 190 14 246 190 14 246 190 14 246 190 14
38058 -246 190 14 246 190 14 246 190 14 246 190 14
38059 -246 190 14 236 178 12 216 158 10 175 118 6
38060 - 80 54 7 2 2 6 6 6 6 30 30 30
38061 - 54 54 54 62 62 62 50 50 50 38 38 38
38062 - 14 14 14 2 2 6 2 2 6 2 2 6
38063 - 2 2 6 2 2 6 2 2 6 2 2 6
38064 - 2 2 6 6 6 6 80 54 7 167 114 7
38065 -213 154 11 236 178 12 246 190 14 246 190 14
38066 -246 190 14 246 190 14 246 190 14 246 190 14
38067 -246 190 14 242 186 14 239 182 13 239 182 13
38068 -230 174 11 210 150 10 174 135 50 124 112 88
38069 - 82 82 82 54 54 54 34 34 34 18 18 18
38070 - 6 6 6 0 0 0 0 0 0 0 0 0
38071 - 0 0 0 0 0 0 0 0 0 0 0 0
38072 - 0 0 0 0 0 0 0 0 0 0 0 0
38073 - 0 0 0 0 0 0 6 6 6 18 18 18
38074 - 50 50 50 158 118 36 192 133 9 200 144 11
38075 -216 158 10 219 162 10 224 166 10 226 170 11
38076 -230 174 11 236 178 12 239 182 13 239 182 13
38077 -242 186 14 246 186 14 246 190 14 246 190 14
38078 -246 190 14 246 190 14 246 190 14 246 190 14
38079 -246 186 14 230 174 11 210 150 10 163 110 8
38080 -104 69 6 10 10 10 2 2 6 2 2 6
38081 - 2 2 6 2 2 6 2 2 6 2 2 6
38082 - 2 2 6 2 2 6 2 2 6 2 2 6
38083 - 2 2 6 2 2 6 2 2 6 2 2 6
38084 - 2 2 6 6 6 6 91 60 6 167 114 7
38085 -206 145 10 230 174 11 242 186 14 246 190 14
38086 -246 190 14 246 190 14 246 186 14 242 186 14
38087 -239 182 13 230 174 11 224 166 10 213 154 11
38088 -180 133 36 124 112 88 86 86 86 58 58 58
38089 - 38 38 38 22 22 22 10 10 10 6 6 6
38090 - 0 0 0 0 0 0 0 0 0 0 0 0
38091 - 0 0 0 0 0 0 0 0 0 0 0 0
38092 - 0 0 0 0 0 0 0 0 0 0 0 0
38093 - 0 0 0 0 0 0 0 0 0 14 14 14
38094 - 34 34 34 70 70 70 138 110 50 158 118 36
38095 -167 114 7 180 123 7 192 133 9 197 138 11
38096 -200 144 11 206 145 10 213 154 11 219 162 10
38097 -224 166 10 230 174 11 239 182 13 242 186 14
38098 -246 186 14 246 186 14 246 186 14 246 186 14
38099 -239 182 13 216 158 10 185 133 11 152 99 6
38100 -104 69 6 18 14 6 2 2 6 2 2 6
38101 - 2 2 6 2 2 6 2 2 6 2 2 6
38102 - 2 2 6 2 2 6 2 2 6 2 2 6
38103 - 2 2 6 2 2 6 2 2 6 2 2 6
38104 - 2 2 6 6 6 6 80 54 7 152 99 6
38105 -192 133 9 219 162 10 236 178 12 239 182 13
38106 -246 186 14 242 186 14 239 182 13 236 178 12
38107 -224 166 10 206 145 10 192 133 9 154 121 60
38108 - 94 94 94 62 62 62 42 42 42 22 22 22
38109 - 14 14 14 6 6 6 0 0 0 0 0 0
38110 - 0 0 0 0 0 0 0 0 0 0 0 0
38111 - 0 0 0 0 0 0 0 0 0 0 0 0
38112 - 0 0 0 0 0 0 0 0 0 0 0 0
38113 - 0 0 0 0 0 0 0 0 0 6 6 6
38114 - 18 18 18 34 34 34 58 58 58 78 78 78
38115 -101 98 89 124 112 88 142 110 46 156 107 11
38116 -163 110 8 167 114 7 175 118 6 180 123 7
38117 -185 133 11 197 138 11 210 150 10 219 162 10
38118 -226 170 11 236 178 12 236 178 12 234 174 13
38119 -219 162 10 197 138 11 163 110 8 130 83 6
38120 - 91 60 6 10 10 10 2 2 6 2 2 6
38121 - 18 18 18 38 38 38 38 38 38 38 38 38
38122 - 38 38 38 38 38 38 38 38 38 38 38 38
38123 - 38 38 38 38 38 38 26 26 26 2 2 6
38124 - 2 2 6 6 6 6 70 47 6 137 92 6
38125 -175 118 6 200 144 11 219 162 10 230 174 11
38126 -234 174 13 230 174 11 219 162 10 210 150 10
38127 -192 133 9 163 110 8 124 112 88 82 82 82
38128 - 50 50 50 30 30 30 14 14 14 6 6 6
38129 - 0 0 0 0 0 0 0 0 0 0 0 0
38130 - 0 0 0 0 0 0 0 0 0 0 0 0
38131 - 0 0 0 0 0 0 0 0 0 0 0 0
38132 - 0 0 0 0 0 0 0 0 0 0 0 0
38133 - 0 0 0 0 0 0 0 0 0 0 0 0
38134 - 6 6 6 14 14 14 22 22 22 34 34 34
38135 - 42 42 42 58 58 58 74 74 74 86 86 86
38136 -101 98 89 122 102 70 130 98 46 121 87 25
38137 -137 92 6 152 99 6 163 110 8 180 123 7
38138 -185 133 11 197 138 11 206 145 10 200 144 11
38139 -180 123 7 156 107 11 130 83 6 104 69 6
38140 - 50 34 6 54 54 54 110 110 110 101 98 89
38141 - 86 86 86 82 82 82 78 78 78 78 78 78
38142 - 78 78 78 78 78 78 78 78 78 78 78 78
38143 - 78 78 78 82 82 82 86 86 86 94 94 94
38144 -106 106 106 101 101 101 86 66 34 124 80 6
38145 -156 107 11 180 123 7 192 133 9 200 144 11
38146 -206 145 10 200 144 11 192 133 9 175 118 6
38147 -139 102 15 109 106 95 70 70 70 42 42 42
38148 - 22 22 22 10 10 10 0 0 0 0 0 0
38149 - 0 0 0 0 0 0 0 0 0 0 0 0
38150 - 0 0 0 0 0 0 0 0 0 0 0 0
38151 - 0 0 0 0 0 0 0 0 0 0 0 0
38152 - 0 0 0 0 0 0 0 0 0 0 0 0
38153 - 0 0 0 0 0 0 0 0 0 0 0 0
38154 - 0 0 0 0 0 0 6 6 6 10 10 10
38155 - 14 14 14 22 22 22 30 30 30 38 38 38
38156 - 50 50 50 62 62 62 74 74 74 90 90 90
38157 -101 98 89 112 100 78 121 87 25 124 80 6
38158 -137 92 6 152 99 6 152 99 6 152 99 6
38159 -138 86 6 124 80 6 98 70 6 86 66 30
38160 -101 98 89 82 82 82 58 58 58 46 46 46
38161 - 38 38 38 34 34 34 34 34 34 34 34 34
38162 - 34 34 34 34 34 34 34 34 34 34 34 34
38163 - 34 34 34 34 34 34 38 38 38 42 42 42
38164 - 54 54 54 82 82 82 94 86 76 91 60 6
38165 -134 86 6 156 107 11 167 114 7 175 118 6
38166 -175 118 6 167 114 7 152 99 6 121 87 25
38167 -101 98 89 62 62 62 34 34 34 18 18 18
38168 - 6 6 6 0 0 0 0 0 0 0 0 0
38169 - 0 0 0 0 0 0 0 0 0 0 0 0
38170 - 0 0 0 0 0 0 0 0 0 0 0 0
38171 - 0 0 0 0 0 0 0 0 0 0 0 0
38172 - 0 0 0 0 0 0 0 0 0 0 0 0
38173 - 0 0 0 0 0 0 0 0 0 0 0 0
38174 - 0 0 0 0 0 0 0 0 0 0 0 0
38175 - 0 0 0 6 6 6 6 6 6 10 10 10
38176 - 18 18 18 22 22 22 30 30 30 42 42 42
38177 - 50 50 50 66 66 66 86 86 86 101 98 89
38178 -106 86 58 98 70 6 104 69 6 104 69 6
38179 -104 69 6 91 60 6 82 62 34 90 90 90
38180 - 62 62 62 38 38 38 22 22 22 14 14 14
38181 - 10 10 10 10 10 10 10 10 10 10 10 10
38182 - 10 10 10 10 10 10 6 6 6 10 10 10
38183 - 10 10 10 10 10 10 10 10 10 14 14 14
38184 - 22 22 22 42 42 42 70 70 70 89 81 66
38185 - 80 54 7 104 69 6 124 80 6 137 92 6
38186 -134 86 6 116 81 8 100 82 52 86 86 86
38187 - 58 58 58 30 30 30 14 14 14 6 6 6
38188 - 0 0 0 0 0 0 0 0 0 0 0 0
38189 - 0 0 0 0 0 0 0 0 0 0 0 0
38190 - 0 0 0 0 0 0 0 0 0 0 0 0
38191 - 0 0 0 0 0 0 0 0 0 0 0 0
38192 - 0 0 0 0 0 0 0 0 0 0 0 0
38193 - 0 0 0 0 0 0 0 0 0 0 0 0
38194 - 0 0 0 0 0 0 0 0 0 0 0 0
38195 - 0 0 0 0 0 0 0 0 0 0 0 0
38196 - 0 0 0 6 6 6 10 10 10 14 14 14
38197 - 18 18 18 26 26 26 38 38 38 54 54 54
38198 - 70 70 70 86 86 86 94 86 76 89 81 66
38199 - 89 81 66 86 86 86 74 74 74 50 50 50
38200 - 30 30 30 14 14 14 6 6 6 0 0 0
38201 - 0 0 0 0 0 0 0 0 0 0 0 0
38202 - 0 0 0 0 0 0 0 0 0 0 0 0
38203 - 0 0 0 0 0 0 0 0 0 0 0 0
38204 - 6 6 6 18 18 18 34 34 34 58 58 58
38205 - 82 82 82 89 81 66 89 81 66 89 81 66
38206 - 94 86 66 94 86 76 74 74 74 50 50 50
38207 - 26 26 26 14 14 14 6 6 6 0 0 0
38208 - 0 0 0 0 0 0 0 0 0 0 0 0
38209 - 0 0 0 0 0 0 0 0 0 0 0 0
38210 - 0 0 0 0 0 0 0 0 0 0 0 0
38211 - 0 0 0 0 0 0 0 0 0 0 0 0
38212 - 0 0 0 0 0 0 0 0 0 0 0 0
38213 - 0 0 0 0 0 0 0 0 0 0 0 0
38214 - 0 0 0 0 0 0 0 0 0 0 0 0
38215 - 0 0 0 0 0 0 0 0 0 0 0 0
38216 - 0 0 0 0 0 0 0 0 0 0 0 0
38217 - 6 6 6 6 6 6 14 14 14 18 18 18
38218 - 30 30 30 38 38 38 46 46 46 54 54 54
38219 - 50 50 50 42 42 42 30 30 30 18 18 18
38220 - 10 10 10 0 0 0 0 0 0 0 0 0
38221 - 0 0 0 0 0 0 0 0 0 0 0 0
38222 - 0 0 0 0 0 0 0 0 0 0 0 0
38223 - 0 0 0 0 0 0 0 0 0 0 0 0
38224 - 0 0 0 6 6 6 14 14 14 26 26 26
38225 - 38 38 38 50 50 50 58 58 58 58 58 58
38226 - 54 54 54 42 42 42 30 30 30 18 18 18
38227 - 10 10 10 0 0 0 0 0 0 0 0 0
38228 - 0 0 0 0 0 0 0 0 0 0 0 0
38229 - 0 0 0 0 0 0 0 0 0 0 0 0
38230 - 0 0 0 0 0 0 0 0 0 0 0 0
38231 - 0 0 0 0 0 0 0 0 0 0 0 0
38232 - 0 0 0 0 0 0 0 0 0 0 0 0
38233 - 0 0 0 0 0 0 0 0 0 0 0 0
38234 - 0 0 0 0 0 0 0 0 0 0 0 0
38235 - 0 0 0 0 0 0 0 0 0 0 0 0
38236 - 0 0 0 0 0 0 0 0 0 0 0 0
38237 - 0 0 0 0 0 0 0 0 0 6 6 6
38238 - 6 6 6 10 10 10 14 14 14 18 18 18
38239 - 18 18 18 14 14 14 10 10 10 6 6 6
38240 - 0 0 0 0 0 0 0 0 0 0 0 0
38241 - 0 0 0 0 0 0 0 0 0 0 0 0
38242 - 0 0 0 0 0 0 0 0 0 0 0 0
38243 - 0 0 0 0 0 0 0 0 0 0 0 0
38244 - 0 0 0 0 0 0 0 0 0 6 6 6
38245 - 14 14 14 18 18 18 22 22 22 22 22 22
38246 - 18 18 18 14 14 14 10 10 10 6 6 6
38247 - 0 0 0 0 0 0 0 0 0 0 0 0
38248 - 0 0 0 0 0 0 0 0 0 0 0 0
38249 - 0 0 0 0 0 0 0 0 0 0 0 0
38250 - 0 0 0 0 0 0 0 0 0 0 0 0
38251 - 0 0 0 0 0 0 0 0 0 0 0 0
38252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38265 +4 4 4 4 4 4
38266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38279 +4 4 4 4 4 4
38280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38293 +4 4 4 4 4 4
38294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38307 +4 4 4 4 4 4
38308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38321 +4 4 4 4 4 4
38322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38335 +4 4 4 4 4 4
38336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38340 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38341 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38342 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38345 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38346 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38347 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38349 +4 4 4 4 4 4
38350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38354 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38355 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38356 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38357 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38358 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38359 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38360 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38361 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38362 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38363 +4 4 4 4 4 4
38364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38368 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38369 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38370 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38371 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38372 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38373 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38374 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38375 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38376 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38377 +4 4 4 4 4 4
38378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38381 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38382 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38383 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38384 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38385 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38386 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38387 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38388 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38389 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38390 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38391 +4 4 4 4 4 4
38392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38395 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38396 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38397 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38398 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38399 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38400 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38401 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38402 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38403 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38404 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38405 +4 4 4 4 4 4
38406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38409 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38410 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38411 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38412 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38413 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38414 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38415 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38416 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38417 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38418 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38419 +4 4 4 4 4 4
38420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38422 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38423 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38424 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38425 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38426 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38427 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38428 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38429 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38430 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38431 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38432 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38433 +4 4 4 4 4 4
38434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38436 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38437 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38438 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38439 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38440 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38441 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38442 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38443 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38444 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38445 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38446 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38447 +4 4 4 4 4 4
38448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38450 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38451 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38452 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38453 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38454 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38455 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38456 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38457 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38458 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38459 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38460 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38461 +4 4 4 4 4 4
38462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38464 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38465 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38466 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38467 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38468 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38469 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38470 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38471 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38472 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38473 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38474 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38475 +4 4 4 4 4 4
38476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38477 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38478 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38479 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38480 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38481 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38482 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38483 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38484 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38485 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38486 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38487 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38488 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38489 +4 4 4 4 4 4
38490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38491 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38492 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38493 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38494 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38495 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38496 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38497 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38498 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38499 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38500 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38501 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38502 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38503 +0 0 0 4 4 4
38504 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38505 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38506 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38507 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38508 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38509 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38510 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38511 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38512 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38513 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38514 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38515 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38516 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38517 +2 0 0 0 0 0
38518 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38519 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38520 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38521 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38522 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38523 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38524 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38525 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38526 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38527 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38528 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38529 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38530 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38531 +37 38 37 0 0 0
38532 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38533 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38534 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38535 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38536 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38537 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38538 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38539 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38540 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38541 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38542 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38543 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38544 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38545 +85 115 134 4 0 0
38546 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38547 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38548 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38549 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38550 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38551 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38552 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38553 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38554 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38555 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38556 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38557 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38558 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38559 +60 73 81 4 0 0
38560 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38561 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38562 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38563 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38564 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38565 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38566 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38567 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38568 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38569 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38570 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38571 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38572 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38573 +16 19 21 4 0 0
38574 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38575 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38576 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38577 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38578 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38579 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38580 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38581 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38582 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38583 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38584 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38585 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38586 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38587 +4 0 0 4 3 3
38588 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38589 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38590 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38591 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38592 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38593 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38594 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38595 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38596 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38597 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38598 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38599 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38600 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38601 +3 2 2 4 4 4
38602 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38603 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38604 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38605 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38606 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38607 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38608 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38609 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38610 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38611 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38612 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38613 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38614 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38615 +4 4 4 4 4 4
38616 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38617 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38618 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38619 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38620 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38621 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38622 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38623 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38624 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38625 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38626 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38627 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38628 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38629 +4 4 4 4 4 4
38630 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38631 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38632 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38633 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38634 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38635 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38636 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38637 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38638 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38639 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38640 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38641 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38642 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38643 +5 5 5 5 5 5
38644 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38645 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38646 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38647 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38648 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38649 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38650 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38651 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38652 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38653 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38654 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38655 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38656 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38657 +5 5 5 4 4 4
38658 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38659 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38660 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38661 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38662 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38663 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38664 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38665 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38666 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38667 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38668 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38669 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38671 +4 4 4 4 4 4
38672 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38673 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38674 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38675 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38676 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38677 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38678 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38679 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38680 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38681 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38682 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38683 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38685 +4 4 4 4 4 4
38686 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38687 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38688 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38689 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38690 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38691 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38692 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38693 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38694 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38695 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38696 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38699 +4 4 4 4 4 4
38700 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38701 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38702 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38703 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38704 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38705 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38706 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38707 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38708 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38709 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38710 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38713 +4 4 4 4 4 4
38714 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38715 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38716 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38717 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38718 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38719 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38720 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38721 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38722 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38723 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38724 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38727 +4 4 4 4 4 4
38728 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38729 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38730 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38731 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38732 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38733 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38734 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38735 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38736 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38737 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38738 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38741 +4 4 4 4 4 4
38742 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38743 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38744 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38745 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38746 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38747 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38748 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38749 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38750 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38751 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38752 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38755 +4 4 4 4 4 4
38756 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38757 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38758 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38759 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38760 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38761 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38762 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38763 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38764 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38765 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38766 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38769 +4 4 4 4 4 4
38770 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38771 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38772 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38773 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38774 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38775 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38776 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38777 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38778 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38779 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38780 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38783 +4 4 4 4 4 4
38784 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38785 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38786 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38787 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38788 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38789 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38790 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38791 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38792 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38793 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38794 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38796 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38797 +4 4 4 4 4 4
38798 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38799 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38800 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38801 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38802 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38803 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38804 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38805 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38806 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38807 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38808 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38810 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38811 +4 4 4 4 4 4
38812 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38813 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38814 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38815 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38816 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38817 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38818 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38819 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38820 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38821 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38822 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38824 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38825 +4 4 4 4 4 4
38826 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38827 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38828 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38829 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38830 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38831 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38832 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38833 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38834 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38835 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38836 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38837 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38838 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38839 +4 4 4 4 4 4
38840 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38841 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38842 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38843 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38844 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38845 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38846 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38847 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38848 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38849 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38850 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38851 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38852 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38853 +4 4 4 4 4 4
38854 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38855 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38856 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38857 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38858 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38859 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38860 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38861 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38862 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38863 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38864 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38866 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38867 +4 4 4 4 4 4
38868 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38869 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38870 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38871 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38872 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38873 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38874 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38875 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38876 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38877 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38878 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38879 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38880 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38881 +4 4 4 4 4 4
38882 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38883 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38884 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38885 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38886 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38887 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38888 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38889 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38890 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38891 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38892 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38895 +4 4 4 4 4 4
38896 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38897 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38898 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38899 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38900 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38901 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38902 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38903 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38904 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38905 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38906 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38909 +4 4 4 4 4 4
38910 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38911 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38912 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38913 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38914 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38915 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38916 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38917 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38918 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38919 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38920 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38923 +4 4 4 4 4 4
38924 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38925 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38926 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38927 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38928 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38929 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38930 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38931 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38932 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38933 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38934 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38935 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38937 +4 4 4 4 4 4
38938 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38939 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38940 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38941 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38942 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38943 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38944 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38945 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38946 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38947 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38948 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38949 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38951 +4 4 4 4 4 4
38952 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38953 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38954 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38955 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38956 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38957 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38958 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38959 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38960 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38961 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38962 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38963 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38965 +4 4 4 4 4 4
38966 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38967 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38968 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38969 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38970 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38971 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38972 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38973 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38974 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38975 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38976 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38979 +4 4 4 4 4 4
38980 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38981 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38982 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38983 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38984 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38985 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38986 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38987 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38988 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38989 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38990 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38993 +4 4 4 4 4 4
38994 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38995 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38996 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38997 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38998 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38999 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
39000 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
39001 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
39002 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
39003 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
39004 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39007 +4 4 4 4 4 4
39008 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
39009 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
39010 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
39011 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
39012 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
39013 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
39014 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
39015 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
39016 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
39017 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
39018 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39021 +4 4 4 4 4 4
39022 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
39023 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39024 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
39025 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
39026 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
39027 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
39028 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
39029 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
39030 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
39031 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
39032 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39035 +4 4 4 4 4 4
39036 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
39037 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39038 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
39039 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
39040 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
39041 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
39042 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
39044 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
39045 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
39046 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39049 +4 4 4 4 4 4
39050 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
39051 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
39052 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
39053 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
39054 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
39055 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
39056 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
39057 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
39058 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
39059 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
39060 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39063 +4 4 4 4 4 4
39064 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
39065 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
39066 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39067 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
39068 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
39069 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
39070 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
39071 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
39072 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
39073 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
39074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39077 +4 4 4 4 4 4
39078 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39079 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
39080 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
39081 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
39082 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
39083 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
39084 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
39085 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
39086 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
39087 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39091 +4 4 4 4 4 4
39092 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
39093 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
39094 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
39095 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
39096 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
39097 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
39098 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
39099 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
39100 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
39101 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39105 +4 4 4 4 4 4
39106 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
39107 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
39108 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
39109 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
39110 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
39111 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
39112 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
39113 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
39114 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39115 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39119 +4 4 4 4 4 4
39120 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
39121 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39122 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
39123 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39124 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
39125 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
39126 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
39127 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
39128 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
39129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39133 +4 4 4 4 4 4
39134 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
39135 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
39136 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
39137 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
39138 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
39139 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
39140 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
39141 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
39142 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
39143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39146 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39147 +4 4 4 4 4 4
39148 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39149 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
39150 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
39151 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
39152 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
39153 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
39154 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
39155 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
39156 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39161 +4 4 4 4 4 4
39162 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
39163 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
39164 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39165 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
39166 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
39167 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
39168 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
39169 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
39170 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39174 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39175 +4 4 4 4 4 4
39176 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39177 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
39178 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
39179 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
39180 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
39181 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
39182 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
39183 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39184 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39186 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39188 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39189 +4 4 4 4 4 4
39190 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39191 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
39192 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39193 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
39194 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
39195 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
39196 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
39197 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
39198 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39200 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39202 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39203 +4 4 4 4 4 4
39204 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39205 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39206 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39207 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39208 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39209 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39210 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39211 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39212 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39217 +4 4 4 4 4 4
39218 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39219 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39220 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39221 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39222 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39223 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39224 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39225 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39226 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39230 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39231 +4 4 4 4 4 4
39232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39234 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39235 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39236 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39237 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39238 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39239 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39240 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39244 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39245 +4 4 4 4 4 4
39246 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39249 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39250 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39251 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39252 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39259 +4 4 4 4 4 4
39260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39263 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39264 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39265 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39266 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39273 +4 4 4 4 4 4
39274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39277 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39278 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39279 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39280 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39287 +4 4 4 4 4 4
39288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39291 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39292 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39293 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39294 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39301 +4 4 4 4 4 4
39302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39306 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39307 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39308 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39315 +4 4 4 4 4 4
39316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39320 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39321 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39322 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39329 +4 4 4 4 4 4
39330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39334 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39335 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39336 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39342 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39343 +4 4 4 4 4 4
39344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39348 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39349 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39356 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39357 +4 4 4 4 4 4
39358 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39362 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39363 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39368 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39369 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39370 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39371 +4 4 4 4 4 4
39372 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39373 index 3473e75..c930142 100644
39374 --- a/drivers/video/udlfb.c
39375 +++ b/drivers/video/udlfb.c
39376 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39377 dlfb_urb_completion(urb);
39378
39379 error:
39380 - atomic_add(bytes_sent, &dev->bytes_sent);
39381 - atomic_add(bytes_identical, &dev->bytes_identical);
39382 - atomic_add(width*height*2, &dev->bytes_rendered);
39383 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39384 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39385 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39386 end_cycles = get_cycles();
39387 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39388 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39389 >> 10)), /* Kcycles */
39390 &dev->cpu_kcycles_used);
39391
39392 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39393 dlfb_urb_completion(urb);
39394
39395 error:
39396 - atomic_add(bytes_sent, &dev->bytes_sent);
39397 - atomic_add(bytes_identical, &dev->bytes_identical);
39398 - atomic_add(bytes_rendered, &dev->bytes_rendered);
39399 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39400 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39401 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39402 end_cycles = get_cycles();
39403 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39404 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39405 >> 10)), /* Kcycles */
39406 &dev->cpu_kcycles_used);
39407 }
39408 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39409 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39410 struct dlfb_data *dev = fb_info->par;
39411 return snprintf(buf, PAGE_SIZE, "%u\n",
39412 - atomic_read(&dev->bytes_rendered));
39413 + atomic_read_unchecked(&dev->bytes_rendered));
39414 }
39415
39416 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39417 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39418 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39419 struct dlfb_data *dev = fb_info->par;
39420 return snprintf(buf, PAGE_SIZE, "%u\n",
39421 - atomic_read(&dev->bytes_identical));
39422 + atomic_read_unchecked(&dev->bytes_identical));
39423 }
39424
39425 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39426 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39427 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39428 struct dlfb_data *dev = fb_info->par;
39429 return snprintf(buf, PAGE_SIZE, "%u\n",
39430 - atomic_read(&dev->bytes_sent));
39431 + atomic_read_unchecked(&dev->bytes_sent));
39432 }
39433
39434 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39435 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39436 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39437 struct dlfb_data *dev = fb_info->par;
39438 return snprintf(buf, PAGE_SIZE, "%u\n",
39439 - atomic_read(&dev->cpu_kcycles_used));
39440 + atomic_read_unchecked(&dev->cpu_kcycles_used));
39441 }
39442
39443 static ssize_t edid_show(
39444 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39445 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39446 struct dlfb_data *dev = fb_info->par;
39447
39448 - atomic_set(&dev->bytes_rendered, 0);
39449 - atomic_set(&dev->bytes_identical, 0);
39450 - atomic_set(&dev->bytes_sent, 0);
39451 - atomic_set(&dev->cpu_kcycles_used, 0);
39452 + atomic_set_unchecked(&dev->bytes_rendered, 0);
39453 + atomic_set_unchecked(&dev->bytes_identical, 0);
39454 + atomic_set_unchecked(&dev->bytes_sent, 0);
39455 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39456
39457 return count;
39458 }
39459 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39460 index 7f8472c..9842e87 100644
39461 --- a/drivers/video/uvesafb.c
39462 +++ b/drivers/video/uvesafb.c
39463 @@ -19,6 +19,7 @@
39464 #include <linux/io.h>
39465 #include <linux/mutex.h>
39466 #include <linux/slab.h>
39467 +#include <linux/moduleloader.h>
39468 #include <video/edid.h>
39469 #include <video/uvesafb.h>
39470 #ifdef CONFIG_X86
39471 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39472 NULL,
39473 };
39474
39475 - return call_usermodehelper(v86d_path, argv, envp, 1);
39476 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39477 }
39478
39479 /*
39480 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39481 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39482 par->pmi_setpal = par->ypan = 0;
39483 } else {
39484 +
39485 +#ifdef CONFIG_PAX_KERNEXEC
39486 +#ifdef CONFIG_MODULES
39487 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39488 +#endif
39489 + if (!par->pmi_code) {
39490 + par->pmi_setpal = par->ypan = 0;
39491 + return 0;
39492 + }
39493 +#endif
39494 +
39495 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39496 + task->t.regs.edi);
39497 +
39498 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39499 + pax_open_kernel();
39500 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39501 + pax_close_kernel();
39502 +
39503 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39504 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39505 +#else
39506 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39507 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39508 +#endif
39509 +
39510 printk(KERN_INFO "uvesafb: protected mode interface info at "
39511 "%04x:%04x\n",
39512 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39513 @@ -1821,6 +1844,11 @@ out:
39514 if (par->vbe_modes)
39515 kfree(par->vbe_modes);
39516
39517 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39518 + if (par->pmi_code)
39519 + module_free_exec(NULL, par->pmi_code);
39520 +#endif
39521 +
39522 framebuffer_release(info);
39523 return err;
39524 }
39525 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39526 kfree(par->vbe_state_orig);
39527 if (par->vbe_state_saved)
39528 kfree(par->vbe_state_saved);
39529 +
39530 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39531 + if (par->pmi_code)
39532 + module_free_exec(NULL, par->pmi_code);
39533 +#endif
39534 +
39535 }
39536
39537 framebuffer_release(info);
39538 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39539 index 501b340..86bd4cf 100644
39540 --- a/drivers/video/vesafb.c
39541 +++ b/drivers/video/vesafb.c
39542 @@ -9,6 +9,7 @@
39543 */
39544
39545 #include <linux/module.h>
39546 +#include <linux/moduleloader.h>
39547 #include <linux/kernel.h>
39548 #include <linux/errno.h>
39549 #include <linux/string.h>
39550 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39551 static int vram_total __initdata; /* Set total amount of memory */
39552 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39553 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39554 -static void (*pmi_start)(void) __read_mostly;
39555 -static void (*pmi_pal) (void) __read_mostly;
39556 +static void (*pmi_start)(void) __read_only;
39557 +static void (*pmi_pal) (void) __read_only;
39558 static int depth __read_mostly;
39559 static int vga_compat __read_mostly;
39560 /* --------------------------------------------------------------------- */
39561 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39562 unsigned int size_vmode;
39563 unsigned int size_remap;
39564 unsigned int size_total;
39565 + void *pmi_code = NULL;
39566
39567 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39568 return -ENODEV;
39569 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39570 size_remap = size_total;
39571 vesafb_fix.smem_len = size_remap;
39572
39573 -#ifndef __i386__
39574 - screen_info.vesapm_seg = 0;
39575 -#endif
39576 -
39577 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39578 printk(KERN_WARNING
39579 "vesafb: cannot reserve video memory at 0x%lx\n",
39580 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39581 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39582 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39583
39584 +#ifdef __i386__
39585 +
39586 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39587 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
39588 + if (!pmi_code)
39589 +#elif !defined(CONFIG_PAX_KERNEXEC)
39590 + if (0)
39591 +#endif
39592 +
39593 +#endif
39594 + screen_info.vesapm_seg = 0;
39595 +
39596 if (screen_info.vesapm_seg) {
39597 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39598 - screen_info.vesapm_seg,screen_info.vesapm_off);
39599 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39600 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39601 }
39602
39603 if (screen_info.vesapm_seg < 0xc000)
39604 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39605
39606 if (ypan || pmi_setpal) {
39607 unsigned short *pmi_base;
39608 +
39609 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39610 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39611 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39612 +
39613 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39614 + pax_open_kernel();
39615 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39616 +#else
39617 + pmi_code = pmi_base;
39618 +#endif
39619 +
39620 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39621 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39622 +
39623 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39624 + pmi_start = ktva_ktla(pmi_start);
39625 + pmi_pal = ktva_ktla(pmi_pal);
39626 + pax_close_kernel();
39627 +#endif
39628 +
39629 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39630 if (pmi_base[3]) {
39631 printk(KERN_INFO "vesafb: pmi: ports = ");
39632 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39633 info->node, info->fix.id);
39634 return 0;
39635 err:
39636 +
39637 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39638 + module_free_exec(NULL, pmi_code);
39639 +#endif
39640 +
39641 if (info->screen_base)
39642 iounmap(info->screen_base);
39643 framebuffer_release(info);
39644 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39645 index 88714ae..16c2e11 100644
39646 --- a/drivers/video/via/via_clock.h
39647 +++ b/drivers/video/via/via_clock.h
39648 @@ -56,7 +56,7 @@ struct via_clock {
39649
39650 void (*set_engine_pll_state)(u8 state);
39651 void (*set_engine_pll)(struct via_pll_config config);
39652 -};
39653 +} __no_const;
39654
39655
39656 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39657 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39658 index e56c934..fc22f4b 100644
39659 --- a/drivers/xen/xen-pciback/conf_space.h
39660 +++ b/drivers/xen/xen-pciback/conf_space.h
39661 @@ -44,15 +44,15 @@ struct config_field {
39662 struct {
39663 conf_dword_write write;
39664 conf_dword_read read;
39665 - } dw;
39666 + } __no_const dw;
39667 struct {
39668 conf_word_write write;
39669 conf_word_read read;
39670 - } w;
39671 + } __no_const w;
39672 struct {
39673 conf_byte_write write;
39674 conf_byte_read read;
39675 - } b;
39676 + } __no_const b;
39677 } u;
39678 struct list_head list;
39679 };
39680 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39681 index 879ed88..bc03a01 100644
39682 --- a/fs/9p/vfs_inode.c
39683 +++ b/fs/9p/vfs_inode.c
39684 @@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39685 void
39686 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39687 {
39688 - char *s = nd_get_link(nd);
39689 + const char *s = nd_get_link(nd);
39690
39691 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39692 IS_ERR(s) ? "<error>" : s);
39693 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39694 index 79e2ca7..5828ad1 100644
39695 --- a/fs/Kconfig.binfmt
39696 +++ b/fs/Kconfig.binfmt
39697 @@ -86,7 +86,7 @@ config HAVE_AOUT
39698
39699 config BINFMT_AOUT
39700 tristate "Kernel support for a.out and ECOFF binaries"
39701 - depends on HAVE_AOUT
39702 + depends on HAVE_AOUT && BROKEN
39703 ---help---
39704 A.out (Assembler.OUTput) is a set of formats for libraries and
39705 executables used in the earliest versions of UNIX. Linux used
39706 diff --git a/fs/aio.c b/fs/aio.c
39707 index 969beb0..09fab51 100644
39708 --- a/fs/aio.c
39709 +++ b/fs/aio.c
39710 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39711 size += sizeof(struct io_event) * nr_events;
39712 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39713
39714 - if (nr_pages < 0)
39715 + if (nr_pages <= 0)
39716 return -EINVAL;
39717
39718 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39719 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39720 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39721 {
39722 ssize_t ret;
39723 + struct iovec iovstack;
39724
39725 #ifdef CONFIG_COMPAT
39726 if (compat)
39727 ret = compat_rw_copy_check_uvector(type,
39728 (struct compat_iovec __user *)kiocb->ki_buf,
39729 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39730 + kiocb->ki_nbytes, 1, &iovstack,
39731 &kiocb->ki_iovec, 1);
39732 else
39733 #endif
39734 ret = rw_copy_check_uvector(type,
39735 (struct iovec __user *)kiocb->ki_buf,
39736 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39737 + kiocb->ki_nbytes, 1, &iovstack,
39738 &kiocb->ki_iovec, 1);
39739 if (ret < 0)
39740 goto out;
39741
39742 + if (kiocb->ki_iovec == &iovstack) {
39743 + kiocb->ki_inline_vec = iovstack;
39744 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39745 + }
39746 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39747 kiocb->ki_cur_seg = 0;
39748 /* ki_nbytes/left now reflect bytes instead of segs */
39749 diff --git a/fs/attr.c b/fs/attr.c
39750 index 7ee7ba4..0c61a60 100644
39751 --- a/fs/attr.c
39752 +++ b/fs/attr.c
39753 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39754 unsigned long limit;
39755
39756 limit = rlimit(RLIMIT_FSIZE);
39757 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39758 if (limit != RLIM_INFINITY && offset > limit)
39759 goto out_sig;
39760 if (offset > inode->i_sb->s_maxbytes)
39761 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39762 index e1fbdee..cd5ea56 100644
39763 --- a/fs/autofs4/waitq.c
39764 +++ b/fs/autofs4/waitq.c
39765 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39766 {
39767 unsigned long sigpipe, flags;
39768 mm_segment_t fs;
39769 - const char *data = (const char *)addr;
39770 + const char __user *data = (const char __force_user *)addr;
39771 ssize_t wr = 0;
39772
39773 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39774 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39775 index 8342ca6..82fd192 100644
39776 --- a/fs/befs/linuxvfs.c
39777 +++ b/fs/befs/linuxvfs.c
39778 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39779 {
39780 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39781 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39782 - char *link = nd_get_link(nd);
39783 + const char *link = nd_get_link(nd);
39784 if (!IS_ERR(link))
39785 kfree(link);
39786 }
39787 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39788 index a6395bd..a5b24c4 100644
39789 --- a/fs/binfmt_aout.c
39790 +++ b/fs/binfmt_aout.c
39791 @@ -16,6 +16,7 @@
39792 #include <linux/string.h>
39793 #include <linux/fs.h>
39794 #include <linux/file.h>
39795 +#include <linux/security.h>
39796 #include <linux/stat.h>
39797 #include <linux/fcntl.h>
39798 #include <linux/ptrace.h>
39799 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39800 #endif
39801 # define START_STACK(u) ((void __user *)u.start_stack)
39802
39803 + memset(&dump, 0, sizeof(dump));
39804 +
39805 fs = get_fs();
39806 set_fs(KERNEL_DS);
39807 has_dumped = 1;
39808 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39809
39810 /* If the size of the dump file exceeds the rlimit, then see what would happen
39811 if we wrote the stack, but not the data area. */
39812 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39813 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39814 dump.u_dsize = 0;
39815
39816 /* Make sure we have enough room to write the stack and data areas. */
39817 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39818 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39819 dump.u_ssize = 0;
39820
39821 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39822 rlim = rlimit(RLIMIT_DATA);
39823 if (rlim >= RLIM_INFINITY)
39824 rlim = ~0;
39825 +
39826 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39827 if (ex.a_data + ex.a_bss > rlim)
39828 return -ENOMEM;
39829
39830 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39831 install_exec_creds(bprm);
39832 current->flags &= ~PF_FORKNOEXEC;
39833
39834 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39835 + current->mm->pax_flags = 0UL;
39836 +#endif
39837 +
39838 +#ifdef CONFIG_PAX_PAGEEXEC
39839 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39840 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39841 +
39842 +#ifdef CONFIG_PAX_EMUTRAMP
39843 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39844 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39845 +#endif
39846 +
39847 +#ifdef CONFIG_PAX_MPROTECT
39848 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39849 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39850 +#endif
39851 +
39852 + }
39853 +#endif
39854 +
39855 if (N_MAGIC(ex) == OMAGIC) {
39856 unsigned long text_addr, map_size;
39857 loff_t pos;
39858 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39859
39860 down_write(&current->mm->mmap_sem);
39861 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39862 - PROT_READ | PROT_WRITE | PROT_EXEC,
39863 + PROT_READ | PROT_WRITE,
39864 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39865 fd_offset + ex.a_text);
39866 up_write(&current->mm->mmap_sem);
39867 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39868 index 21ac5ee..31d14e9 100644
39869 --- a/fs/binfmt_elf.c
39870 +++ b/fs/binfmt_elf.c
39871 @@ -32,6 +32,7 @@
39872 #include <linux/elf.h>
39873 #include <linux/utsname.h>
39874 #include <linux/coredump.h>
39875 +#include <linux/xattr.h>
39876 #include <asm/uaccess.h>
39877 #include <asm/param.h>
39878 #include <asm/page.h>
39879 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39880 #define elf_core_dump NULL
39881 #endif
39882
39883 +#ifdef CONFIG_PAX_MPROTECT
39884 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39885 +#endif
39886 +
39887 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39888 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39889 #else
39890 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39891 .load_binary = load_elf_binary,
39892 .load_shlib = load_elf_library,
39893 .core_dump = elf_core_dump,
39894 +
39895 +#ifdef CONFIG_PAX_MPROTECT
39896 + .handle_mprotect= elf_handle_mprotect,
39897 +#endif
39898 +
39899 .min_coredump = ELF_EXEC_PAGESIZE,
39900 };
39901
39902 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39903
39904 static int set_brk(unsigned long start, unsigned long end)
39905 {
39906 + unsigned long e = end;
39907 +
39908 start = ELF_PAGEALIGN(start);
39909 end = ELF_PAGEALIGN(end);
39910 if (end > start) {
39911 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39912 if (BAD_ADDR(addr))
39913 return addr;
39914 }
39915 - current->mm->start_brk = current->mm->brk = end;
39916 + current->mm->start_brk = current->mm->brk = e;
39917 return 0;
39918 }
39919
39920 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39921 elf_addr_t __user *u_rand_bytes;
39922 const char *k_platform = ELF_PLATFORM;
39923 const char *k_base_platform = ELF_BASE_PLATFORM;
39924 - unsigned char k_rand_bytes[16];
39925 + u32 k_rand_bytes[4];
39926 int items;
39927 elf_addr_t *elf_info;
39928 int ei_index = 0;
39929 const struct cred *cred = current_cred();
39930 struct vm_area_struct *vma;
39931 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39932
39933 /*
39934 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39935 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39936 * Generate 16 random bytes for userspace PRNG seeding.
39937 */
39938 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39939 - u_rand_bytes = (elf_addr_t __user *)
39940 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39941 + srandom32(k_rand_bytes[0] ^ random32());
39942 + srandom32(k_rand_bytes[1] ^ random32());
39943 + srandom32(k_rand_bytes[2] ^ random32());
39944 + srandom32(k_rand_bytes[3] ^ random32());
39945 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39946 + u_rand_bytes = (elf_addr_t __user *) p;
39947 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39948 return -EFAULT;
39949
39950 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39951 return -EFAULT;
39952 current->mm->env_end = p;
39953
39954 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39955 +
39956 /* Put the elf_info on the stack in the right place. */
39957 sp = (elf_addr_t __user *)envp + 1;
39958 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39959 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39960 return -EFAULT;
39961 return 0;
39962 }
39963 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39964 {
39965 struct elf_phdr *elf_phdata;
39966 struct elf_phdr *eppnt;
39967 - unsigned long load_addr = 0;
39968 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39969 int load_addr_set = 0;
39970 unsigned long last_bss = 0, elf_bss = 0;
39971 - unsigned long error = ~0UL;
39972 + unsigned long error = -EINVAL;
39973 unsigned long total_size;
39974 int retval, i, size;
39975
39976 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39977 goto out_close;
39978 }
39979
39980 +#ifdef CONFIG_PAX_SEGMEXEC
39981 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39982 + pax_task_size = SEGMEXEC_TASK_SIZE;
39983 +#endif
39984 +
39985 eppnt = elf_phdata;
39986 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39987 if (eppnt->p_type == PT_LOAD) {
39988 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39989 k = load_addr + eppnt->p_vaddr;
39990 if (BAD_ADDR(k) ||
39991 eppnt->p_filesz > eppnt->p_memsz ||
39992 - eppnt->p_memsz > TASK_SIZE ||
39993 - TASK_SIZE - eppnt->p_memsz < k) {
39994 + eppnt->p_memsz > pax_task_size ||
39995 + pax_task_size - eppnt->p_memsz < k) {
39996 error = -ENOMEM;
39997 goto out_close;
39998 }
39999 @@ -528,6 +552,351 @@ out:
40000 return error;
40001 }
40002
40003 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
40004 +{
40005 + unsigned long pax_flags = 0UL;
40006 +
40007 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40008 +
40009 +#ifdef CONFIG_PAX_PAGEEXEC
40010 + if (elf_phdata->p_flags & PF_PAGEEXEC)
40011 + pax_flags |= MF_PAX_PAGEEXEC;
40012 +#endif
40013 +
40014 +#ifdef CONFIG_PAX_SEGMEXEC
40015 + if (elf_phdata->p_flags & PF_SEGMEXEC)
40016 + pax_flags |= MF_PAX_SEGMEXEC;
40017 +#endif
40018 +
40019 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40020 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40021 + if ((__supported_pte_mask & _PAGE_NX))
40022 + pax_flags &= ~MF_PAX_SEGMEXEC;
40023 + else
40024 + pax_flags &= ~MF_PAX_PAGEEXEC;
40025 + }
40026 +#endif
40027 +
40028 +#ifdef CONFIG_PAX_EMUTRAMP
40029 + if (elf_phdata->p_flags & PF_EMUTRAMP)
40030 + pax_flags |= MF_PAX_EMUTRAMP;
40031 +#endif
40032 +
40033 +#ifdef CONFIG_PAX_MPROTECT
40034 + if (elf_phdata->p_flags & PF_MPROTECT)
40035 + pax_flags |= MF_PAX_MPROTECT;
40036 +#endif
40037 +
40038 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40039 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
40040 + pax_flags |= MF_PAX_RANDMMAP;
40041 +#endif
40042 +
40043 +#endif
40044 +
40045 + return pax_flags;
40046 +}
40047 +
40048 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
40049 +{
40050 + unsigned long pax_flags = 0UL;
40051 +
40052 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40053 +
40054 +#ifdef CONFIG_PAX_PAGEEXEC
40055 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
40056 + pax_flags |= MF_PAX_PAGEEXEC;
40057 +#endif
40058 +
40059 +#ifdef CONFIG_PAX_SEGMEXEC
40060 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
40061 + pax_flags |= MF_PAX_SEGMEXEC;
40062 +#endif
40063 +
40064 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40065 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40066 + if ((__supported_pte_mask & _PAGE_NX))
40067 + pax_flags &= ~MF_PAX_SEGMEXEC;
40068 + else
40069 + pax_flags &= ~MF_PAX_PAGEEXEC;
40070 + }
40071 +#endif
40072 +
40073 +#ifdef CONFIG_PAX_EMUTRAMP
40074 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
40075 + pax_flags |= MF_PAX_EMUTRAMP;
40076 +#endif
40077 +
40078 +#ifdef CONFIG_PAX_MPROTECT
40079 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
40080 + pax_flags |= MF_PAX_MPROTECT;
40081 +#endif
40082 +
40083 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40084 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
40085 + pax_flags |= MF_PAX_RANDMMAP;
40086 +#endif
40087 +
40088 +#endif
40089 +
40090 + return pax_flags;
40091 +}
40092 +
40093 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
40094 +{
40095 + unsigned long pax_flags = 0UL;
40096 +
40097 +#ifdef CONFIG_PAX_EI_PAX
40098 +
40099 +#ifdef CONFIG_PAX_PAGEEXEC
40100 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
40101 + pax_flags |= MF_PAX_PAGEEXEC;
40102 +#endif
40103 +
40104 +#ifdef CONFIG_PAX_SEGMEXEC
40105 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
40106 + pax_flags |= MF_PAX_SEGMEXEC;
40107 +#endif
40108 +
40109 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40110 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40111 + if ((__supported_pte_mask & _PAGE_NX))
40112 + pax_flags &= ~MF_PAX_SEGMEXEC;
40113 + else
40114 + pax_flags &= ~MF_PAX_PAGEEXEC;
40115 + }
40116 +#endif
40117 +
40118 +#ifdef CONFIG_PAX_EMUTRAMP
40119 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
40120 + pax_flags |= MF_PAX_EMUTRAMP;
40121 +#endif
40122 +
40123 +#ifdef CONFIG_PAX_MPROTECT
40124 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
40125 + pax_flags |= MF_PAX_MPROTECT;
40126 +#endif
40127 +
40128 +#ifdef CONFIG_PAX_ASLR
40129 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
40130 + pax_flags |= MF_PAX_RANDMMAP;
40131 +#endif
40132 +
40133 +#else
40134 +
40135 +#ifdef CONFIG_PAX_PAGEEXEC
40136 + pax_flags |= MF_PAX_PAGEEXEC;
40137 +#endif
40138 +
40139 +#ifdef CONFIG_PAX_MPROTECT
40140 + pax_flags |= MF_PAX_MPROTECT;
40141 +#endif
40142 +
40143 +#ifdef CONFIG_PAX_RANDMMAP
40144 + pax_flags |= MF_PAX_RANDMMAP;
40145 +#endif
40146 +
40147 +#ifdef CONFIG_PAX_SEGMEXEC
40148 + if (!(__supported_pte_mask & _PAGE_NX)) {
40149 + pax_flags &= ~MF_PAX_PAGEEXEC;
40150 + pax_flags |= MF_PAX_SEGMEXEC;
40151 + }
40152 +#endif
40153 +
40154 +#endif
40155 +
40156 + return pax_flags;
40157 +}
40158 +
40159 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
40160 +{
40161 +
40162 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40163 + unsigned long i;
40164 +
40165 + for (i = 0UL; i < elf_ex->e_phnum; i++)
40166 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
40167 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
40168 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
40169 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
40170 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
40171 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
40172 + return ~0UL;
40173 +
40174 +#ifdef CONFIG_PAX_SOFTMODE
40175 + if (pax_softmode)
40176 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
40177 + else
40178 +#endif
40179 +
40180 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
40181 + break;
40182 + }
40183 +#endif
40184 +
40185 + return ~0UL;
40186 +}
40187 +
40188 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40189 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
40190 +{
40191 + unsigned long pax_flags = 0UL;
40192 +
40193 +#ifdef CONFIG_PAX_PAGEEXEC
40194 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
40195 + pax_flags |= MF_PAX_PAGEEXEC;
40196 +#endif
40197 +
40198 +#ifdef CONFIG_PAX_SEGMEXEC
40199 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
40200 + pax_flags |= MF_PAX_SEGMEXEC;
40201 +#endif
40202 +
40203 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40204 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40205 + if ((__supported_pte_mask & _PAGE_NX))
40206 + pax_flags &= ~MF_PAX_SEGMEXEC;
40207 + else
40208 + pax_flags &= ~MF_PAX_PAGEEXEC;
40209 + }
40210 +#endif
40211 +
40212 +#ifdef CONFIG_PAX_EMUTRAMP
40213 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
40214 + pax_flags |= MF_PAX_EMUTRAMP;
40215 +#endif
40216 +
40217 +#ifdef CONFIG_PAX_MPROTECT
40218 + if (pax_flags_softmode & MF_PAX_MPROTECT)
40219 + pax_flags |= MF_PAX_MPROTECT;
40220 +#endif
40221 +
40222 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40223 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
40224 + pax_flags |= MF_PAX_RANDMMAP;
40225 +#endif
40226 +
40227 + return pax_flags;
40228 +}
40229 +
40230 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
40231 +{
40232 + unsigned long pax_flags = 0UL;
40233 +
40234 +#ifdef CONFIG_PAX_PAGEEXEC
40235 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
40236 + pax_flags |= MF_PAX_PAGEEXEC;
40237 +#endif
40238 +
40239 +#ifdef CONFIG_PAX_SEGMEXEC
40240 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
40241 + pax_flags |= MF_PAX_SEGMEXEC;
40242 +#endif
40243 +
40244 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40245 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40246 + if ((__supported_pte_mask & _PAGE_NX))
40247 + pax_flags &= ~MF_PAX_SEGMEXEC;
40248 + else
40249 + pax_flags &= ~MF_PAX_PAGEEXEC;
40250 + }
40251 +#endif
40252 +
40253 +#ifdef CONFIG_PAX_EMUTRAMP
40254 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
40255 + pax_flags |= MF_PAX_EMUTRAMP;
40256 +#endif
40257 +
40258 +#ifdef CONFIG_PAX_MPROTECT
40259 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
40260 + pax_flags |= MF_PAX_MPROTECT;
40261 +#endif
40262 +
40263 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40264 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
40265 + pax_flags |= MF_PAX_RANDMMAP;
40266 +#endif
40267 +
40268 + return pax_flags;
40269 +}
40270 +#endif
40271 +
40272 +static unsigned long pax_parse_xattr_pax(struct file * const file)
40273 +{
40274 +
40275 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40276 + ssize_t xattr_size, i;
40277 + unsigned char xattr_value[5];
40278 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
40279 +
40280 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
40281 + if (xattr_size <= 0)
40282 + return ~0UL;
40283 +
40284 + for (i = 0; i < xattr_size; i++)
40285 + switch (xattr_value[i]) {
40286 + default:
40287 + return ~0UL;
40288 +
40289 +#define parse_flag(option1, option2, flag) \
40290 + case option1: \
40291 + pax_flags_hardmode |= MF_PAX_##flag; \
40292 + break; \
40293 + case option2: \
40294 + pax_flags_softmode |= MF_PAX_##flag; \
40295 + break;
40296 +
40297 + parse_flag('p', 'P', PAGEEXEC);
40298 + parse_flag('e', 'E', EMUTRAMP);
40299 + parse_flag('m', 'M', MPROTECT);
40300 + parse_flag('r', 'R', RANDMMAP);
40301 + parse_flag('s', 'S', SEGMEXEC);
40302 +
40303 +#undef parse_flag
40304 + }
40305 +
40306 + if (pax_flags_hardmode & pax_flags_softmode)
40307 + return ~0UL;
40308 +
40309 +#ifdef CONFIG_PAX_SOFTMODE
40310 + if (pax_softmode)
40311 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40312 + else
40313 +#endif
40314 +
40315 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40316 +#else
40317 + return ~0UL;
40318 +#endif
40319 +
40320 +}
40321 +
40322 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40323 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40324 +{
40325 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40326 +
40327 + pax_flags = pax_parse_ei_pax(elf_ex);
40328 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40329 + xattr_pax_flags = pax_parse_xattr_pax(file);
40330 +
40331 + if (pt_pax_flags == ~0UL)
40332 + pt_pax_flags = xattr_pax_flags;
40333 + else if (xattr_pax_flags == ~0UL)
40334 + xattr_pax_flags = pt_pax_flags;
40335 + if (pt_pax_flags != xattr_pax_flags)
40336 + return -EINVAL;
40337 + if (pt_pax_flags != ~0UL)
40338 + pax_flags = pt_pax_flags;
40339 +
40340 + if (0 > pax_check_flags(&pax_flags))
40341 + return -EINVAL;
40342 +
40343 + current->mm->pax_flags = pax_flags;
40344 + return 0;
40345 +}
40346 +#endif
40347 +
40348 /*
40349 * These are the functions used to load ELF style executables and shared
40350 * libraries. There is no binary dependent code anywhere else.
40351 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40352 {
40353 unsigned int random_variable = 0;
40354
40355 +#ifdef CONFIG_PAX_RANDUSTACK
40356 + if (randomize_va_space)
40357 + return stack_top - current->mm->delta_stack;
40358 +#endif
40359 +
40360 if ((current->flags & PF_RANDOMIZE) &&
40361 !(current->personality & ADDR_NO_RANDOMIZE)) {
40362 random_variable = get_random_int() & STACK_RND_MASK;
40363 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40364 unsigned long load_addr = 0, load_bias = 0;
40365 int load_addr_set = 0;
40366 char * elf_interpreter = NULL;
40367 - unsigned long error;
40368 + unsigned long error = 0;
40369 struct elf_phdr *elf_ppnt, *elf_phdata;
40370 unsigned long elf_bss, elf_brk;
40371 int retval, i;
40372 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40373 unsigned long start_code, end_code, start_data, end_data;
40374 unsigned long reloc_func_desc __maybe_unused = 0;
40375 int executable_stack = EXSTACK_DEFAULT;
40376 - unsigned long def_flags = 0;
40377 struct {
40378 struct elfhdr elf_ex;
40379 struct elfhdr interp_elf_ex;
40380 } *loc;
40381 + unsigned long pax_task_size = TASK_SIZE;
40382
40383 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40384 if (!loc) {
40385 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40386
40387 /* OK, This is the point of no return */
40388 current->flags &= ~PF_FORKNOEXEC;
40389 - current->mm->def_flags = def_flags;
40390 +
40391 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40392 + current->mm->pax_flags = 0UL;
40393 +#endif
40394 +
40395 +#ifdef CONFIG_PAX_DLRESOLVE
40396 + current->mm->call_dl_resolve = 0UL;
40397 +#endif
40398 +
40399 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40400 + current->mm->call_syscall = 0UL;
40401 +#endif
40402 +
40403 +#ifdef CONFIG_PAX_ASLR
40404 + current->mm->delta_mmap = 0UL;
40405 + current->mm->delta_stack = 0UL;
40406 +#endif
40407 +
40408 + current->mm->def_flags = 0;
40409 +
40410 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40411 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40412 + send_sig(SIGKILL, current, 0);
40413 + goto out_free_dentry;
40414 + }
40415 +#endif
40416 +
40417 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40418 + pax_set_initial_flags(bprm);
40419 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40420 + if (pax_set_initial_flags_func)
40421 + (pax_set_initial_flags_func)(bprm);
40422 +#endif
40423 +
40424 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40425 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40426 + current->mm->context.user_cs_limit = PAGE_SIZE;
40427 + current->mm->def_flags |= VM_PAGEEXEC;
40428 + }
40429 +#endif
40430 +
40431 +#ifdef CONFIG_PAX_SEGMEXEC
40432 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40433 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40434 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40435 + pax_task_size = SEGMEXEC_TASK_SIZE;
40436 + current->mm->def_flags |= VM_NOHUGEPAGE;
40437 + }
40438 +#endif
40439 +
40440 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40441 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40442 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40443 + put_cpu();
40444 + }
40445 +#endif
40446
40447 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40448 may depend on the personality. */
40449 SET_PERSONALITY(loc->elf_ex);
40450 +
40451 +#ifdef CONFIG_PAX_ASLR
40452 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40453 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40454 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40455 + }
40456 +#endif
40457 +
40458 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40459 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40460 + executable_stack = EXSTACK_DISABLE_X;
40461 + current->personality &= ~READ_IMPLIES_EXEC;
40462 + } else
40463 +#endif
40464 +
40465 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40466 current->personality |= READ_IMPLIES_EXEC;
40467
40468 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40469 #else
40470 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40471 #endif
40472 +
40473 +#ifdef CONFIG_PAX_RANDMMAP
40474 + /* PaX: randomize base address at the default exe base if requested */
40475 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40476 +#ifdef CONFIG_SPARC64
40477 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40478 +#else
40479 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40480 +#endif
40481 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40482 + elf_flags |= MAP_FIXED;
40483 + }
40484 +#endif
40485 +
40486 }
40487
40488 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40489 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40490 * allowed task size. Note that p_filesz must always be
40491 * <= p_memsz so it is only necessary to check p_memsz.
40492 */
40493 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40494 - elf_ppnt->p_memsz > TASK_SIZE ||
40495 - TASK_SIZE - elf_ppnt->p_memsz < k) {
40496 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40497 + elf_ppnt->p_memsz > pax_task_size ||
40498 + pax_task_size - elf_ppnt->p_memsz < k) {
40499 /* set_brk can never work. Avoid overflows. */
40500 send_sig(SIGKILL, current, 0);
40501 retval = -EINVAL;
40502 @@ -870,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40503 start_data += load_bias;
40504 end_data += load_bias;
40505
40506 +#ifdef CONFIG_PAX_RANDMMAP
40507 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40508 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40509 +#endif
40510 +
40511 /* Calling set_brk effectively mmaps the pages that we need
40512 * for the bss and break sections. We must do this before
40513 * mapping in the interpreter, to make sure it doesn't wind
40514 @@ -881,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40515 goto out_free_dentry;
40516 }
40517 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40518 - send_sig(SIGSEGV, current, 0);
40519 - retval = -EFAULT; /* Nobody gets to see this, but.. */
40520 - goto out_free_dentry;
40521 + /*
40522 + * This bss-zeroing can fail if the ELF
40523 + * file specifies odd protections. So
40524 + * we don't check the return value
40525 + */
40526 }
40527
40528 if (elf_interpreter) {
40529 @@ -1098,7 +1563,7 @@ out:
40530 * Decide what to dump of a segment, part, all or none.
40531 */
40532 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40533 - unsigned long mm_flags)
40534 + unsigned long mm_flags, long signr)
40535 {
40536 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40537
40538 @@ -1132,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40539 if (vma->vm_file == NULL)
40540 return 0;
40541
40542 - if (FILTER(MAPPED_PRIVATE))
40543 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40544 goto whole;
40545
40546 /*
40547 @@ -1354,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40548 {
40549 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40550 int i = 0;
40551 - do
40552 + do {
40553 i += 2;
40554 - while (auxv[i - 2] != AT_NULL);
40555 + } while (auxv[i - 2] != AT_NULL);
40556 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40557 }
40558
40559 @@ -1862,14 +2327,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40560 }
40561
40562 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40563 - unsigned long mm_flags)
40564 + struct coredump_params *cprm)
40565 {
40566 struct vm_area_struct *vma;
40567 size_t size = 0;
40568
40569 for (vma = first_vma(current, gate_vma); vma != NULL;
40570 vma = next_vma(vma, gate_vma))
40571 - size += vma_dump_size(vma, mm_flags);
40572 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40573 return size;
40574 }
40575
40576 @@ -1963,7 +2428,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40577
40578 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40579
40580 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40581 + offset += elf_core_vma_data_size(gate_vma, cprm);
40582 offset += elf_core_extra_data_size();
40583 e_shoff = offset;
40584
40585 @@ -1977,10 +2442,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40586 offset = dataoff;
40587
40588 size += sizeof(*elf);
40589 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40590 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40591 goto end_coredump;
40592
40593 size += sizeof(*phdr4note);
40594 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40595 if (size > cprm->limit
40596 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40597 goto end_coredump;
40598 @@ -1994,7 +2461,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40599 phdr.p_offset = offset;
40600 phdr.p_vaddr = vma->vm_start;
40601 phdr.p_paddr = 0;
40602 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40603 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40604 phdr.p_memsz = vma->vm_end - vma->vm_start;
40605 offset += phdr.p_filesz;
40606 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40607 @@ -2005,6 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40608 phdr.p_align = ELF_EXEC_PAGESIZE;
40609
40610 size += sizeof(phdr);
40611 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40612 if (size > cprm->limit
40613 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40614 goto end_coredump;
40615 @@ -2029,7 +2497,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40616 unsigned long addr;
40617 unsigned long end;
40618
40619 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40620 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40621
40622 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40623 struct page *page;
40624 @@ -2038,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40625 page = get_dump_page(addr);
40626 if (page) {
40627 void *kaddr = kmap(page);
40628 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40629 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40630 !dump_write(cprm->file, kaddr,
40631 PAGE_SIZE);
40632 @@ -2055,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40633
40634 if (e_phnum == PN_XNUM) {
40635 size += sizeof(*shdr4extnum);
40636 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40637 if (size > cprm->limit
40638 || !dump_write(cprm->file, shdr4extnum,
40639 sizeof(*shdr4extnum)))
40640 @@ -2075,6 +2545,97 @@ out:
40641
40642 #endif /* CONFIG_ELF_CORE */
40643
40644 +#ifdef CONFIG_PAX_MPROTECT
40645 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
40646 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40647 + * we'll remove VM_MAYWRITE for good on RELRO segments.
40648 + *
40649 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40650 + * basis because we want to allow the common case and not the special ones.
40651 + */
40652 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40653 +{
40654 + struct elfhdr elf_h;
40655 + struct elf_phdr elf_p;
40656 + unsigned long i;
40657 + unsigned long oldflags;
40658 + bool is_textrel_rw, is_textrel_rx, is_relro;
40659 +
40660 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40661 + return;
40662 +
40663 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40664 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40665 +
40666 +#ifdef CONFIG_PAX_ELFRELOCS
40667 + /* possible TEXTREL */
40668 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40669 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40670 +#else
40671 + is_textrel_rw = false;
40672 + is_textrel_rx = false;
40673 +#endif
40674 +
40675 + /* possible RELRO */
40676 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40677 +
40678 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40679 + return;
40680 +
40681 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40682 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40683 +
40684 +#ifdef CONFIG_PAX_ETEXECRELOCS
40685 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40686 +#else
40687 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40688 +#endif
40689 +
40690 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40691 + !elf_check_arch(&elf_h) ||
40692 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40693 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40694 + return;
40695 +
40696 + for (i = 0UL; i < elf_h.e_phnum; i++) {
40697 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40698 + return;
40699 + switch (elf_p.p_type) {
40700 + case PT_DYNAMIC:
40701 + if (!is_textrel_rw && !is_textrel_rx)
40702 + continue;
40703 + i = 0UL;
40704 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40705 + elf_dyn dyn;
40706 +
40707 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40708 + return;
40709 + if (dyn.d_tag == DT_NULL)
40710 + return;
40711 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40712 + gr_log_textrel(vma);
40713 + if (is_textrel_rw)
40714 + vma->vm_flags |= VM_MAYWRITE;
40715 + else
40716 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40717 + vma->vm_flags &= ~VM_MAYWRITE;
40718 + return;
40719 + }
40720 + i++;
40721 + }
40722 + return;
40723 +
40724 + case PT_GNU_RELRO:
40725 + if (!is_relro)
40726 + continue;
40727 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40728 + vma->vm_flags &= ~VM_MAYWRITE;
40729 + return;
40730 + }
40731 + }
40732 +}
40733 +#endif
40734 +
40735 static int __init init_elf_binfmt(void)
40736 {
40737 return register_binfmt(&elf_format);
40738 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40739 index 1bffbe0..c8c283e 100644
40740 --- a/fs/binfmt_flat.c
40741 +++ b/fs/binfmt_flat.c
40742 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40743 realdatastart = (unsigned long) -ENOMEM;
40744 printk("Unable to allocate RAM for process data, errno %d\n",
40745 (int)-realdatastart);
40746 + down_write(&current->mm->mmap_sem);
40747 do_munmap(current->mm, textpos, text_len);
40748 + up_write(&current->mm->mmap_sem);
40749 ret = realdatastart;
40750 goto err;
40751 }
40752 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40753 }
40754 if (IS_ERR_VALUE(result)) {
40755 printk("Unable to read data+bss, errno %d\n", (int)-result);
40756 + down_write(&current->mm->mmap_sem);
40757 do_munmap(current->mm, textpos, text_len);
40758 do_munmap(current->mm, realdatastart, len);
40759 + up_write(&current->mm->mmap_sem);
40760 ret = result;
40761 goto err;
40762 }
40763 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40764 }
40765 if (IS_ERR_VALUE(result)) {
40766 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40767 + down_write(&current->mm->mmap_sem);
40768 do_munmap(current->mm, textpos, text_len + data_len + extra +
40769 MAX_SHARED_LIBS * sizeof(unsigned long));
40770 + up_write(&current->mm->mmap_sem);
40771 ret = result;
40772 goto err;
40773 }
40774 diff --git a/fs/bio.c b/fs/bio.c
40775 index b1fe82c..84da0a9 100644
40776 --- a/fs/bio.c
40777 +++ b/fs/bio.c
40778 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40779 const int read = bio_data_dir(bio) == READ;
40780 struct bio_map_data *bmd = bio->bi_private;
40781 int i;
40782 - char *p = bmd->sgvecs[0].iov_base;
40783 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40784
40785 __bio_for_each_segment(bvec, bio, i, 0) {
40786 char *addr = page_address(bvec->bv_page);
40787 diff --git a/fs/block_dev.c b/fs/block_dev.c
40788 index b07f1da..9efcb92 100644
40789 --- a/fs/block_dev.c
40790 +++ b/fs/block_dev.c
40791 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40792 else if (bdev->bd_contains == bdev)
40793 return true; /* is a whole device which isn't held */
40794
40795 - else if (whole->bd_holder == bd_may_claim)
40796 + else if (whole->bd_holder == (void *)bd_may_claim)
40797 return true; /* is a partition of a device that is being partitioned */
40798 else if (whole->bd_holder != NULL)
40799 return false; /* is a partition of a held device */
40800 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40801 index dede441..f2a2507 100644
40802 --- a/fs/btrfs/ctree.c
40803 +++ b/fs/btrfs/ctree.c
40804 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40805 free_extent_buffer(buf);
40806 add_root_to_dirty_list(root);
40807 } else {
40808 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40809 - parent_start = parent->start;
40810 - else
40811 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40812 + if (parent)
40813 + parent_start = parent->start;
40814 + else
40815 + parent_start = 0;
40816 + } else
40817 parent_start = 0;
40818
40819 WARN_ON(trans->transid != btrfs_header_generation(parent));
40820 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40821 index fd1a06d..6e9033d 100644
40822 --- a/fs/btrfs/inode.c
40823 +++ b/fs/btrfs/inode.c
40824 @@ -6895,7 +6895,7 @@ fail:
40825 return -ENOMEM;
40826 }
40827
40828 -static int btrfs_getattr(struct vfsmount *mnt,
40829 +int btrfs_getattr(struct vfsmount *mnt,
40830 struct dentry *dentry, struct kstat *stat)
40831 {
40832 struct inode *inode = dentry->d_inode;
40833 @@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40834 return 0;
40835 }
40836
40837 +EXPORT_SYMBOL(btrfs_getattr);
40838 +
40839 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40840 +{
40841 + return BTRFS_I(inode)->root->anon_dev;
40842 +}
40843 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40844 +
40845 /*
40846 * If a file is moved, it will inherit the cow and compression flags of the new
40847 * directory.
40848 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40849 index c04f02c..f5c9e2e 100644
40850 --- a/fs/btrfs/ioctl.c
40851 +++ b/fs/btrfs/ioctl.c
40852 @@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40853 for (i = 0; i < num_types; i++) {
40854 struct btrfs_space_info *tmp;
40855
40856 + /* Don't copy in more than we allocated */
40857 if (!slot_count)
40858 break;
40859
40860 + slot_count--;
40861 +
40862 info = NULL;
40863 rcu_read_lock();
40864 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40865 @@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40866 memcpy(dest, &space, sizeof(space));
40867 dest++;
40868 space_args.total_spaces++;
40869 - slot_count--;
40870 }
40871 - if (!slot_count)
40872 - break;
40873 }
40874 up_read(&info->groups_sem);
40875 }
40876
40877 - user_dest = (struct btrfs_ioctl_space_info *)
40878 + user_dest = (struct btrfs_ioctl_space_info __user *)
40879 (arg + sizeof(struct btrfs_ioctl_space_args));
40880
40881 if (copy_to_user(user_dest, dest_orig, alloc_size))
40882 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40883 index cfb5543..1ae7347 100644
40884 --- a/fs/btrfs/relocation.c
40885 +++ b/fs/btrfs/relocation.c
40886 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40887 }
40888 spin_unlock(&rc->reloc_root_tree.lock);
40889
40890 - BUG_ON((struct btrfs_root *)node->data != root);
40891 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40892
40893 if (!del) {
40894 spin_lock(&rc->reloc_root_tree.lock);
40895 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40896 index 622f469..e8d2d55 100644
40897 --- a/fs/cachefiles/bind.c
40898 +++ b/fs/cachefiles/bind.c
40899 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40900 args);
40901
40902 /* start by checking things over */
40903 - ASSERT(cache->fstop_percent >= 0 &&
40904 - cache->fstop_percent < cache->fcull_percent &&
40905 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40906 cache->fcull_percent < cache->frun_percent &&
40907 cache->frun_percent < 100);
40908
40909 - ASSERT(cache->bstop_percent >= 0 &&
40910 - cache->bstop_percent < cache->bcull_percent &&
40911 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40912 cache->bcull_percent < cache->brun_percent &&
40913 cache->brun_percent < 100);
40914
40915 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40916 index 0a1467b..6a53245 100644
40917 --- a/fs/cachefiles/daemon.c
40918 +++ b/fs/cachefiles/daemon.c
40919 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40920 if (n > buflen)
40921 return -EMSGSIZE;
40922
40923 - if (copy_to_user(_buffer, buffer, n) != 0)
40924 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40925 return -EFAULT;
40926
40927 return n;
40928 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40929 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40930 return -EIO;
40931
40932 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40933 + if (datalen > PAGE_SIZE - 1)
40934 return -EOPNOTSUPP;
40935
40936 /* drag the command string into the kernel so we can parse it */
40937 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40938 if (args[0] != '%' || args[1] != '\0')
40939 return -EINVAL;
40940
40941 - if (fstop < 0 || fstop >= cache->fcull_percent)
40942 + if (fstop >= cache->fcull_percent)
40943 return cachefiles_daemon_range_error(cache, args);
40944
40945 cache->fstop_percent = fstop;
40946 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40947 if (args[0] != '%' || args[1] != '\0')
40948 return -EINVAL;
40949
40950 - if (bstop < 0 || bstop >= cache->bcull_percent)
40951 + if (bstop >= cache->bcull_percent)
40952 return cachefiles_daemon_range_error(cache, args);
40953
40954 cache->bstop_percent = bstop;
40955 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40956 index bd6bc1b..b627b53 100644
40957 --- a/fs/cachefiles/internal.h
40958 +++ b/fs/cachefiles/internal.h
40959 @@ -57,7 +57,7 @@ struct cachefiles_cache {
40960 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40961 struct rb_root active_nodes; /* active nodes (can't be culled) */
40962 rwlock_t active_lock; /* lock for active_nodes */
40963 - atomic_t gravecounter; /* graveyard uniquifier */
40964 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40965 unsigned frun_percent; /* when to stop culling (% files) */
40966 unsigned fcull_percent; /* when to start culling (% files) */
40967 unsigned fstop_percent; /* when to stop allocating (% files) */
40968 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40969 * proc.c
40970 */
40971 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40972 -extern atomic_t cachefiles_lookup_histogram[HZ];
40973 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40974 -extern atomic_t cachefiles_create_histogram[HZ];
40975 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40976 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40977 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40978
40979 extern int __init cachefiles_proc_init(void);
40980 extern void cachefiles_proc_cleanup(void);
40981 static inline
40982 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40983 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40984 {
40985 unsigned long jif = jiffies - start_jif;
40986 if (jif >= HZ)
40987 jif = HZ - 1;
40988 - atomic_inc(&histogram[jif]);
40989 + atomic_inc_unchecked(&histogram[jif]);
40990 }
40991
40992 #else
40993 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
40994 index a0358c2..d6137f2 100644
40995 --- a/fs/cachefiles/namei.c
40996 +++ b/fs/cachefiles/namei.c
40997 @@ -318,7 +318,7 @@ try_again:
40998 /* first step is to make up a grave dentry in the graveyard */
40999 sprintf(nbuffer, "%08x%08x",
41000 (uint32_t) get_seconds(),
41001 - (uint32_t) atomic_inc_return(&cache->gravecounter));
41002 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
41003
41004 /* do the multiway lock magic */
41005 trap = lock_rename(cache->graveyard, dir);
41006 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
41007 index eccd339..4c1d995 100644
41008 --- a/fs/cachefiles/proc.c
41009 +++ b/fs/cachefiles/proc.c
41010 @@ -14,9 +14,9 @@
41011 #include <linux/seq_file.h>
41012 #include "internal.h"
41013
41014 -atomic_t cachefiles_lookup_histogram[HZ];
41015 -atomic_t cachefiles_mkdir_histogram[HZ];
41016 -atomic_t cachefiles_create_histogram[HZ];
41017 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
41018 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
41019 +atomic_unchecked_t cachefiles_create_histogram[HZ];
41020
41021 /*
41022 * display the latency histogram
41023 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
41024 return 0;
41025 default:
41026 index = (unsigned long) v - 3;
41027 - x = atomic_read(&cachefiles_lookup_histogram[index]);
41028 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
41029 - z = atomic_read(&cachefiles_create_histogram[index]);
41030 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
41031 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
41032 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
41033 if (x == 0 && y == 0 && z == 0)
41034 return 0;
41035
41036 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
41037 index 0e3c092..818480e 100644
41038 --- a/fs/cachefiles/rdwr.c
41039 +++ b/fs/cachefiles/rdwr.c
41040 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
41041 old_fs = get_fs();
41042 set_fs(KERNEL_DS);
41043 ret = file->f_op->write(
41044 - file, (const void __user *) data, len, &pos);
41045 + file, (const void __force_user *) data, len, &pos);
41046 set_fs(old_fs);
41047 kunmap(page);
41048 if (ret != len)
41049 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
41050 index 9895400..fa40a7d 100644
41051 --- a/fs/ceph/dir.c
41052 +++ b/fs/ceph/dir.c
41053 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
41054 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
41055 struct ceph_mds_client *mdsc = fsc->mdsc;
41056 unsigned frag = fpos_frag(filp->f_pos);
41057 - int off = fpos_off(filp->f_pos);
41058 + unsigned int off = fpos_off(filp->f_pos);
41059 int err;
41060 u32 ftype;
41061 struct ceph_mds_reply_info_parsed *rinfo;
41062 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
41063 index 84e8c07..6170d31 100644
41064 --- a/fs/cifs/cifs_debug.c
41065 +++ b/fs/cifs/cifs_debug.c
41066 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
41067
41068 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
41069 #ifdef CONFIG_CIFS_STATS2
41070 - atomic_set(&totBufAllocCount, 0);
41071 - atomic_set(&totSmBufAllocCount, 0);
41072 + atomic_set_unchecked(&totBufAllocCount, 0);
41073 + atomic_set_unchecked(&totSmBufAllocCount, 0);
41074 #endif /* CONFIG_CIFS_STATS2 */
41075 spin_lock(&cifs_tcp_ses_lock);
41076 list_for_each(tmp1, &cifs_tcp_ses_list) {
41077 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
41078 tcon = list_entry(tmp3,
41079 struct cifs_tcon,
41080 tcon_list);
41081 - atomic_set(&tcon->num_smbs_sent, 0);
41082 - atomic_set(&tcon->num_writes, 0);
41083 - atomic_set(&tcon->num_reads, 0);
41084 - atomic_set(&tcon->num_oplock_brks, 0);
41085 - atomic_set(&tcon->num_opens, 0);
41086 - atomic_set(&tcon->num_posixopens, 0);
41087 - atomic_set(&tcon->num_posixmkdirs, 0);
41088 - atomic_set(&tcon->num_closes, 0);
41089 - atomic_set(&tcon->num_deletes, 0);
41090 - atomic_set(&tcon->num_mkdirs, 0);
41091 - atomic_set(&tcon->num_rmdirs, 0);
41092 - atomic_set(&tcon->num_renames, 0);
41093 - atomic_set(&tcon->num_t2renames, 0);
41094 - atomic_set(&tcon->num_ffirst, 0);
41095 - atomic_set(&tcon->num_fnext, 0);
41096 - atomic_set(&tcon->num_fclose, 0);
41097 - atomic_set(&tcon->num_hardlinks, 0);
41098 - atomic_set(&tcon->num_symlinks, 0);
41099 - atomic_set(&tcon->num_locks, 0);
41100 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
41101 + atomic_set_unchecked(&tcon->num_writes, 0);
41102 + atomic_set_unchecked(&tcon->num_reads, 0);
41103 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
41104 + atomic_set_unchecked(&tcon->num_opens, 0);
41105 + atomic_set_unchecked(&tcon->num_posixopens, 0);
41106 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
41107 + atomic_set_unchecked(&tcon->num_closes, 0);
41108 + atomic_set_unchecked(&tcon->num_deletes, 0);
41109 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
41110 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
41111 + atomic_set_unchecked(&tcon->num_renames, 0);
41112 + atomic_set_unchecked(&tcon->num_t2renames, 0);
41113 + atomic_set_unchecked(&tcon->num_ffirst, 0);
41114 + atomic_set_unchecked(&tcon->num_fnext, 0);
41115 + atomic_set_unchecked(&tcon->num_fclose, 0);
41116 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
41117 + atomic_set_unchecked(&tcon->num_symlinks, 0);
41118 + atomic_set_unchecked(&tcon->num_locks, 0);
41119 }
41120 }
41121 }
41122 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
41123 smBufAllocCount.counter, cifs_min_small);
41124 #ifdef CONFIG_CIFS_STATS2
41125 seq_printf(m, "Total Large %d Small %d Allocations\n",
41126 - atomic_read(&totBufAllocCount),
41127 - atomic_read(&totSmBufAllocCount));
41128 + atomic_read_unchecked(&totBufAllocCount),
41129 + atomic_read_unchecked(&totSmBufAllocCount));
41130 #endif /* CONFIG_CIFS_STATS2 */
41131
41132 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
41133 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
41134 if (tcon->need_reconnect)
41135 seq_puts(m, "\tDISCONNECTED ");
41136 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
41137 - atomic_read(&tcon->num_smbs_sent),
41138 - atomic_read(&tcon->num_oplock_brks));
41139 + atomic_read_unchecked(&tcon->num_smbs_sent),
41140 + atomic_read_unchecked(&tcon->num_oplock_brks));
41141 seq_printf(m, "\nReads: %d Bytes: %lld",
41142 - atomic_read(&tcon->num_reads),
41143 + atomic_read_unchecked(&tcon->num_reads),
41144 (long long)(tcon->bytes_read));
41145 seq_printf(m, "\nWrites: %d Bytes: %lld",
41146 - atomic_read(&tcon->num_writes),
41147 + atomic_read_unchecked(&tcon->num_writes),
41148 (long long)(tcon->bytes_written));
41149 seq_printf(m, "\nFlushes: %d",
41150 - atomic_read(&tcon->num_flushes));
41151 + atomic_read_unchecked(&tcon->num_flushes));
41152 seq_printf(m, "\nLocks: %d HardLinks: %d "
41153 "Symlinks: %d",
41154 - atomic_read(&tcon->num_locks),
41155 - atomic_read(&tcon->num_hardlinks),
41156 - atomic_read(&tcon->num_symlinks));
41157 + atomic_read_unchecked(&tcon->num_locks),
41158 + atomic_read_unchecked(&tcon->num_hardlinks),
41159 + atomic_read_unchecked(&tcon->num_symlinks));
41160 seq_printf(m, "\nOpens: %d Closes: %d "
41161 "Deletes: %d",
41162 - atomic_read(&tcon->num_opens),
41163 - atomic_read(&tcon->num_closes),
41164 - atomic_read(&tcon->num_deletes));
41165 + atomic_read_unchecked(&tcon->num_opens),
41166 + atomic_read_unchecked(&tcon->num_closes),
41167 + atomic_read_unchecked(&tcon->num_deletes));
41168 seq_printf(m, "\nPosix Opens: %d "
41169 "Posix Mkdirs: %d",
41170 - atomic_read(&tcon->num_posixopens),
41171 - atomic_read(&tcon->num_posixmkdirs));
41172 + atomic_read_unchecked(&tcon->num_posixopens),
41173 + atomic_read_unchecked(&tcon->num_posixmkdirs));
41174 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
41175 - atomic_read(&tcon->num_mkdirs),
41176 - atomic_read(&tcon->num_rmdirs));
41177 + atomic_read_unchecked(&tcon->num_mkdirs),
41178 + atomic_read_unchecked(&tcon->num_rmdirs));
41179 seq_printf(m, "\nRenames: %d T2 Renames %d",
41180 - atomic_read(&tcon->num_renames),
41181 - atomic_read(&tcon->num_t2renames));
41182 + atomic_read_unchecked(&tcon->num_renames),
41183 + atomic_read_unchecked(&tcon->num_t2renames));
41184 seq_printf(m, "\nFindFirst: %d FNext %d "
41185 "FClose %d",
41186 - atomic_read(&tcon->num_ffirst),
41187 - atomic_read(&tcon->num_fnext),
41188 - atomic_read(&tcon->num_fclose));
41189 + atomic_read_unchecked(&tcon->num_ffirst),
41190 + atomic_read_unchecked(&tcon->num_fnext),
41191 + atomic_read_unchecked(&tcon->num_fclose));
41192 }
41193 }
41194 }
41195 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
41196 index 8f1fe32..38f9e27 100644
41197 --- a/fs/cifs/cifsfs.c
41198 +++ b/fs/cifs/cifsfs.c
41199 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
41200 cifs_req_cachep = kmem_cache_create("cifs_request",
41201 CIFSMaxBufSize +
41202 MAX_CIFS_HDR_SIZE, 0,
41203 - SLAB_HWCACHE_ALIGN, NULL);
41204 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
41205 if (cifs_req_cachep == NULL)
41206 return -ENOMEM;
41207
41208 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
41209 efficient to alloc 1 per page off the slab compared to 17K (5page)
41210 alloc of large cifs buffers even when page debugging is on */
41211 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
41212 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
41213 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
41214 NULL);
41215 if (cifs_sm_req_cachep == NULL) {
41216 mempool_destroy(cifs_req_poolp);
41217 @@ -1101,8 +1101,8 @@ init_cifs(void)
41218 atomic_set(&bufAllocCount, 0);
41219 atomic_set(&smBufAllocCount, 0);
41220 #ifdef CONFIG_CIFS_STATS2
41221 - atomic_set(&totBufAllocCount, 0);
41222 - atomic_set(&totSmBufAllocCount, 0);
41223 + atomic_set_unchecked(&totBufAllocCount, 0);
41224 + atomic_set_unchecked(&totSmBufAllocCount, 0);
41225 #endif /* CONFIG_CIFS_STATS2 */
41226
41227 atomic_set(&midCount, 0);
41228 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
41229 index 8238aa1..0347196 100644
41230 --- a/fs/cifs/cifsglob.h
41231 +++ b/fs/cifs/cifsglob.h
41232 @@ -392,28 +392,28 @@ struct cifs_tcon {
41233 __u16 Flags; /* optional support bits */
41234 enum statusEnum tidStatus;
41235 #ifdef CONFIG_CIFS_STATS
41236 - atomic_t num_smbs_sent;
41237 - atomic_t num_writes;
41238 - atomic_t num_reads;
41239 - atomic_t num_flushes;
41240 - atomic_t num_oplock_brks;
41241 - atomic_t num_opens;
41242 - atomic_t num_closes;
41243 - atomic_t num_deletes;
41244 - atomic_t num_mkdirs;
41245 - atomic_t num_posixopens;
41246 - atomic_t num_posixmkdirs;
41247 - atomic_t num_rmdirs;
41248 - atomic_t num_renames;
41249 - atomic_t num_t2renames;
41250 - atomic_t num_ffirst;
41251 - atomic_t num_fnext;
41252 - atomic_t num_fclose;
41253 - atomic_t num_hardlinks;
41254 - atomic_t num_symlinks;
41255 - atomic_t num_locks;
41256 - atomic_t num_acl_get;
41257 - atomic_t num_acl_set;
41258 + atomic_unchecked_t num_smbs_sent;
41259 + atomic_unchecked_t num_writes;
41260 + atomic_unchecked_t num_reads;
41261 + atomic_unchecked_t num_flushes;
41262 + atomic_unchecked_t num_oplock_brks;
41263 + atomic_unchecked_t num_opens;
41264 + atomic_unchecked_t num_closes;
41265 + atomic_unchecked_t num_deletes;
41266 + atomic_unchecked_t num_mkdirs;
41267 + atomic_unchecked_t num_posixopens;
41268 + atomic_unchecked_t num_posixmkdirs;
41269 + atomic_unchecked_t num_rmdirs;
41270 + atomic_unchecked_t num_renames;
41271 + atomic_unchecked_t num_t2renames;
41272 + atomic_unchecked_t num_ffirst;
41273 + atomic_unchecked_t num_fnext;
41274 + atomic_unchecked_t num_fclose;
41275 + atomic_unchecked_t num_hardlinks;
41276 + atomic_unchecked_t num_symlinks;
41277 + atomic_unchecked_t num_locks;
41278 + atomic_unchecked_t num_acl_get;
41279 + atomic_unchecked_t num_acl_set;
41280 #ifdef CONFIG_CIFS_STATS2
41281 unsigned long long time_writes;
41282 unsigned long long time_reads;
41283 @@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
41284 }
41285
41286 #ifdef CONFIG_CIFS_STATS
41287 -#define cifs_stats_inc atomic_inc
41288 +#define cifs_stats_inc atomic_inc_unchecked
41289
41290 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41291 unsigned int bytes)
41292 @@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
41293 /* Various Debug counters */
41294 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41295 #ifdef CONFIG_CIFS_STATS2
41296 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41297 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41298 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41299 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41300 #endif
41301 GLOBAL_EXTERN atomic_t smBufAllocCount;
41302 GLOBAL_EXTERN atomic_t midCount;
41303 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41304 index 6b0e064..94e6c3c 100644
41305 --- a/fs/cifs/link.c
41306 +++ b/fs/cifs/link.c
41307 @@ -600,7 +600,7 @@ symlink_exit:
41308
41309 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41310 {
41311 - char *p = nd_get_link(nd);
41312 + const char *p = nd_get_link(nd);
41313 if (!IS_ERR(p))
41314 kfree(p);
41315 }
41316 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41317 index 703ef5c..2a44ed5 100644
41318 --- a/fs/cifs/misc.c
41319 +++ b/fs/cifs/misc.c
41320 @@ -156,7 +156,7 @@ cifs_buf_get(void)
41321 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41322 atomic_inc(&bufAllocCount);
41323 #ifdef CONFIG_CIFS_STATS2
41324 - atomic_inc(&totBufAllocCount);
41325 + atomic_inc_unchecked(&totBufAllocCount);
41326 #endif /* CONFIG_CIFS_STATS2 */
41327 }
41328
41329 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41330 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41331 atomic_inc(&smBufAllocCount);
41332 #ifdef CONFIG_CIFS_STATS2
41333 - atomic_inc(&totSmBufAllocCount);
41334 + atomic_inc_unchecked(&totSmBufAllocCount);
41335 #endif /* CONFIG_CIFS_STATS2 */
41336
41337 }
41338 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41339 index 6901578..d402eb5 100644
41340 --- a/fs/coda/cache.c
41341 +++ b/fs/coda/cache.c
41342 @@ -24,7 +24,7 @@
41343 #include "coda_linux.h"
41344 #include "coda_cache.h"
41345
41346 -static atomic_t permission_epoch = ATOMIC_INIT(0);
41347 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41348
41349 /* replace or extend an acl cache hit */
41350 void coda_cache_enter(struct inode *inode, int mask)
41351 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41352 struct coda_inode_info *cii = ITOC(inode);
41353
41354 spin_lock(&cii->c_lock);
41355 - cii->c_cached_epoch = atomic_read(&permission_epoch);
41356 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41357 if (cii->c_uid != current_fsuid()) {
41358 cii->c_uid = current_fsuid();
41359 cii->c_cached_perm = mask;
41360 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41361 {
41362 struct coda_inode_info *cii = ITOC(inode);
41363 spin_lock(&cii->c_lock);
41364 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41365 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41366 spin_unlock(&cii->c_lock);
41367 }
41368
41369 /* remove all acl caches */
41370 void coda_cache_clear_all(struct super_block *sb)
41371 {
41372 - atomic_inc(&permission_epoch);
41373 + atomic_inc_unchecked(&permission_epoch);
41374 }
41375
41376
41377 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41378 spin_lock(&cii->c_lock);
41379 hit = (mask & cii->c_cached_perm) == mask &&
41380 cii->c_uid == current_fsuid() &&
41381 - cii->c_cached_epoch == atomic_read(&permission_epoch);
41382 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41383 spin_unlock(&cii->c_lock);
41384
41385 return hit;
41386 diff --git a/fs/compat.c b/fs/compat.c
41387 index c987875..08771ca 100644
41388 --- a/fs/compat.c
41389 +++ b/fs/compat.c
41390 @@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41391 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41392 {
41393 compat_ino_t ino = stat->ino;
41394 - typeof(ubuf->st_uid) uid = 0;
41395 - typeof(ubuf->st_gid) gid = 0;
41396 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41397 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41398 int err;
41399
41400 SET_UID(uid, stat->uid);
41401 @@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41402
41403 set_fs(KERNEL_DS);
41404 /* The __user pointer cast is valid because of the set_fs() */
41405 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41406 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41407 set_fs(oldfs);
41408 /* truncating is ok because it's a user address */
41409 if (!ret)
41410 @@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41411 goto out;
41412
41413 ret = -EINVAL;
41414 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41415 + if (nr_segs > UIO_MAXIOV)
41416 goto out;
41417 if (nr_segs > fast_segs) {
41418 ret = -ENOMEM;
41419 @@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41420
41421 struct compat_readdir_callback {
41422 struct compat_old_linux_dirent __user *dirent;
41423 + struct file * file;
41424 int result;
41425 };
41426
41427 @@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41428 buf->result = -EOVERFLOW;
41429 return -EOVERFLOW;
41430 }
41431 +
41432 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41433 + return 0;
41434 +
41435 buf->result++;
41436 dirent = buf->dirent;
41437 if (!access_ok(VERIFY_WRITE, dirent,
41438 @@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41439
41440 buf.result = 0;
41441 buf.dirent = dirent;
41442 + buf.file = file;
41443
41444 error = vfs_readdir(file, compat_fillonedir, &buf);
41445 if (buf.result)
41446 @@ -914,6 +920,7 @@ struct compat_linux_dirent {
41447 struct compat_getdents_callback {
41448 struct compat_linux_dirent __user *current_dir;
41449 struct compat_linux_dirent __user *previous;
41450 + struct file * file;
41451 int count;
41452 int error;
41453 };
41454 @@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41455 buf->error = -EOVERFLOW;
41456 return -EOVERFLOW;
41457 }
41458 +
41459 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41460 + return 0;
41461 +
41462 dirent = buf->previous;
41463 if (dirent) {
41464 if (__put_user(offset, &dirent->d_off))
41465 @@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41466 buf.previous = NULL;
41467 buf.count = count;
41468 buf.error = 0;
41469 + buf.file = file;
41470
41471 error = vfs_readdir(file, compat_filldir, &buf);
41472 if (error >= 0)
41473 @@ -1003,6 +1015,7 @@ out:
41474 struct compat_getdents_callback64 {
41475 struct linux_dirent64 __user *current_dir;
41476 struct linux_dirent64 __user *previous;
41477 + struct file * file;
41478 int count;
41479 int error;
41480 };
41481 @@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41482 buf->error = -EINVAL; /* only used if we fail.. */
41483 if (reclen > buf->count)
41484 return -EINVAL;
41485 +
41486 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41487 + return 0;
41488 +
41489 dirent = buf->previous;
41490
41491 if (dirent) {
41492 @@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41493 buf.previous = NULL;
41494 buf.count = count;
41495 buf.error = 0;
41496 + buf.file = file;
41497
41498 error = vfs_readdir(file, compat_filldir64, &buf);
41499 if (error >= 0)
41500 error = buf.error;
41501 lastdirent = buf.previous;
41502 if (lastdirent) {
41503 - typeof(lastdirent->d_off) d_off = file->f_pos;
41504 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41505 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41506 error = -EFAULT;
41507 else
41508 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41509 index 112e45a..b59845b 100644
41510 --- a/fs/compat_binfmt_elf.c
41511 +++ b/fs/compat_binfmt_elf.c
41512 @@ -30,11 +30,13 @@
41513 #undef elf_phdr
41514 #undef elf_shdr
41515 #undef elf_note
41516 +#undef elf_dyn
41517 #undef elf_addr_t
41518 #define elfhdr elf32_hdr
41519 #define elf_phdr elf32_phdr
41520 #define elf_shdr elf32_shdr
41521 #define elf_note elf32_note
41522 +#define elf_dyn Elf32_Dyn
41523 #define elf_addr_t Elf32_Addr
41524
41525 /*
41526 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41527 index 51352de..93292ff 100644
41528 --- a/fs/compat_ioctl.c
41529 +++ b/fs/compat_ioctl.c
41530 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41531
41532 err = get_user(palp, &up->palette);
41533 err |= get_user(length, &up->length);
41534 + if (err)
41535 + return -EFAULT;
41536
41537 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41538 err = put_user(compat_ptr(palp), &up_native->palette);
41539 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41540 return -EFAULT;
41541 if (__get_user(udata, &ss32->iomem_base))
41542 return -EFAULT;
41543 - ss.iomem_base = compat_ptr(udata);
41544 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41545 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41546 __get_user(ss.port_high, &ss32->port_high))
41547 return -EFAULT;
41548 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41549 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41550 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41551 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41552 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41553 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41554 return -EFAULT;
41555
41556 return ioctl_preallocate(file, p);
41557 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41558 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41559 {
41560 unsigned int a, b;
41561 - a = *(unsigned int *)p;
41562 - b = *(unsigned int *)q;
41563 + a = *(const unsigned int *)p;
41564 + b = *(const unsigned int *)q;
41565 if (a > b)
41566 return 1;
41567 if (a < b)
41568 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41569 index 9a37a9b..35792b6 100644
41570 --- a/fs/configfs/dir.c
41571 +++ b/fs/configfs/dir.c
41572 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41573 }
41574 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41575 struct configfs_dirent *next;
41576 - const char * name;
41577 + const unsigned char * name;
41578 + char d_name[sizeof(next->s_dentry->d_iname)];
41579 int len;
41580 struct inode *inode = NULL;
41581
41582 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41583 continue;
41584
41585 name = configfs_get_name(next);
41586 - len = strlen(name);
41587 + if (next->s_dentry && name == next->s_dentry->d_iname) {
41588 + len = next->s_dentry->d_name.len;
41589 + memcpy(d_name, name, len);
41590 + name = d_name;
41591 + } else
41592 + len = strlen(name);
41593
41594 /*
41595 * We'll have a dentry and an inode for
41596 diff --git a/fs/dcache.c b/fs/dcache.c
41597 index f7908ae..920a680 100644
41598 --- a/fs/dcache.c
41599 +++ b/fs/dcache.c
41600 @@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41601 mempages -= reserve;
41602
41603 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41604 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41605 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41606
41607 dcache_init();
41608 inode_init();
41609 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
41610 index f3a257d..715ac0f 100644
41611 --- a/fs/debugfs/inode.c
41612 +++ b/fs/debugfs/inode.c
41613 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
41614 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
41615 {
41616 return debugfs_create_file(name,
41617 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41618 + S_IFDIR | S_IRWXU,
41619 +#else
41620 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41621 +#endif
41622 parent, NULL, NULL);
41623 }
41624 EXPORT_SYMBOL_GPL(debugfs_create_dir);
41625 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41626 index d2039ca..a766407 100644
41627 --- a/fs/ecryptfs/inode.c
41628 +++ b/fs/ecryptfs/inode.c
41629 @@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41630 old_fs = get_fs();
41631 set_fs(get_ds());
41632 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41633 - (char __user *)lower_buf,
41634 + (char __force_user *)lower_buf,
41635 lower_bufsiz);
41636 set_fs(old_fs);
41637 if (rc < 0)
41638 @@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41639 }
41640 old_fs = get_fs();
41641 set_fs(get_ds());
41642 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41643 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41644 set_fs(old_fs);
41645 if (rc < 0) {
41646 kfree(buf);
41647 @@ -752,7 +752,7 @@ out:
41648 static void
41649 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41650 {
41651 - char *buf = nd_get_link(nd);
41652 + const char *buf = nd_get_link(nd);
41653 if (!IS_ERR(buf)) {
41654 /* Free the char* */
41655 kfree(buf);
41656 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41657 index 0dc5a3d..d3cdeea 100644
41658 --- a/fs/ecryptfs/miscdev.c
41659 +++ b/fs/ecryptfs/miscdev.c
41660 @@ -328,7 +328,7 @@ check_list:
41661 goto out_unlock_msg_ctx;
41662 i = 5;
41663 if (msg_ctx->msg) {
41664 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
41665 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41666 goto out_unlock_msg_ctx;
41667 i += packet_length_size;
41668 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41669 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41670 index 608c1c3..7d040a8 100644
41671 --- a/fs/ecryptfs/read_write.c
41672 +++ b/fs/ecryptfs/read_write.c
41673 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41674 return -EIO;
41675 fs_save = get_fs();
41676 set_fs(get_ds());
41677 - rc = vfs_write(lower_file, data, size, &offset);
41678 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41679 set_fs(fs_save);
41680 mark_inode_dirty_sync(ecryptfs_inode);
41681 return rc;
41682 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41683 return -EIO;
41684 fs_save = get_fs();
41685 set_fs(get_ds());
41686 - rc = vfs_read(lower_file, data, size, &offset);
41687 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41688 set_fs(fs_save);
41689 return rc;
41690 }
41691 diff --git a/fs/exec.c b/fs/exec.c
41692 index 3625464..04855f9 100644
41693 --- a/fs/exec.c
41694 +++ b/fs/exec.c
41695 @@ -55,12 +55,28 @@
41696 #include <linux/pipe_fs_i.h>
41697 #include <linux/oom.h>
41698 #include <linux/compat.h>
41699 +#include <linux/random.h>
41700 +#include <linux/seq_file.h>
41701 +
41702 +#ifdef CONFIG_PAX_REFCOUNT
41703 +#include <linux/kallsyms.h>
41704 +#include <linux/kdebug.h>
41705 +#endif
41706
41707 #include <asm/uaccess.h>
41708 #include <asm/mmu_context.h>
41709 #include <asm/tlb.h>
41710 #include "internal.h"
41711
41712 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41713 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41714 +#endif
41715 +
41716 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41717 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41718 +EXPORT_SYMBOL(pax_set_initial_flags_func);
41719 +#endif
41720 +
41721 int core_uses_pid;
41722 char core_pattern[CORENAME_MAX_SIZE] = "core";
41723 unsigned int core_pipe_limit;
41724 @@ -70,7 +86,7 @@ struct core_name {
41725 char *corename;
41726 int used, size;
41727 };
41728 -static atomic_t call_count = ATOMIC_INIT(1);
41729 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41730
41731 /* The maximal length of core_pattern is also specified in sysctl.c */
41732
41733 @@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41734 int write)
41735 {
41736 struct page *page;
41737 - int ret;
41738
41739 -#ifdef CONFIG_STACK_GROWSUP
41740 - if (write) {
41741 - ret = expand_downwards(bprm->vma, pos);
41742 - if (ret < 0)
41743 - return NULL;
41744 - }
41745 -#endif
41746 - ret = get_user_pages(current, bprm->mm, pos,
41747 - 1, write, 1, &page, NULL);
41748 - if (ret <= 0)
41749 + if (0 > expand_downwards(bprm->vma, pos))
41750 + return NULL;
41751 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41752 return NULL;
41753
41754 if (write) {
41755 @@ -215,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41756 if (size <= ARG_MAX)
41757 return page;
41758
41759 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41760 + // only allow 1MB for argv+env on suid/sgid binaries
41761 + // to prevent easy ASLR exhaustion
41762 + if (((bprm->cred->euid != current_euid()) ||
41763 + (bprm->cred->egid != current_egid())) &&
41764 + (size > (1024 * 1024))) {
41765 + put_page(page);
41766 + return NULL;
41767 + }
41768 +#endif
41769 +
41770 /*
41771 * Limit to 1/4-th the stack size for the argv+env strings.
41772 * This ensures that:
41773 @@ -274,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41774 vma->vm_end = STACK_TOP_MAX;
41775 vma->vm_start = vma->vm_end - PAGE_SIZE;
41776 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41777 +
41778 +#ifdef CONFIG_PAX_SEGMEXEC
41779 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41780 +#endif
41781 +
41782 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41783 INIT_LIST_HEAD(&vma->anon_vma_chain);
41784
41785 @@ -288,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41786 mm->stack_vm = mm->total_vm = 1;
41787 up_write(&mm->mmap_sem);
41788 bprm->p = vma->vm_end - sizeof(void *);
41789 +
41790 +#ifdef CONFIG_PAX_RANDUSTACK
41791 + if (randomize_va_space)
41792 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41793 +#endif
41794 +
41795 return 0;
41796 err:
41797 up_write(&mm->mmap_sem);
41798 @@ -396,19 +426,7 @@ err:
41799 return err;
41800 }
41801
41802 -struct user_arg_ptr {
41803 -#ifdef CONFIG_COMPAT
41804 - bool is_compat;
41805 -#endif
41806 - union {
41807 - const char __user *const __user *native;
41808 -#ifdef CONFIG_COMPAT
41809 - compat_uptr_t __user *compat;
41810 -#endif
41811 - } ptr;
41812 -};
41813 -
41814 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41815 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41816 {
41817 const char __user *native;
41818
41819 @@ -417,14 +435,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41820 compat_uptr_t compat;
41821
41822 if (get_user(compat, argv.ptr.compat + nr))
41823 - return ERR_PTR(-EFAULT);
41824 + return (const char __force_user *)ERR_PTR(-EFAULT);
41825
41826 return compat_ptr(compat);
41827 }
41828 #endif
41829
41830 if (get_user(native, argv.ptr.native + nr))
41831 - return ERR_PTR(-EFAULT);
41832 + return (const char __force_user *)ERR_PTR(-EFAULT);
41833
41834 return native;
41835 }
41836 @@ -443,7 +461,7 @@ static int count(struct user_arg_ptr argv, int max)
41837 if (!p)
41838 break;
41839
41840 - if (IS_ERR(p))
41841 + if (IS_ERR((const char __force_kernel *)p))
41842 return -EFAULT;
41843
41844 if (i++ >= max)
41845 @@ -477,7 +495,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41846
41847 ret = -EFAULT;
41848 str = get_user_arg_ptr(argv, argc);
41849 - if (IS_ERR(str))
41850 + if (IS_ERR((const char __force_kernel *)str))
41851 goto out;
41852
41853 len = strnlen_user(str, MAX_ARG_STRLEN);
41854 @@ -559,7 +577,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41855 int r;
41856 mm_segment_t oldfs = get_fs();
41857 struct user_arg_ptr argv = {
41858 - .ptr.native = (const char __user *const __user *)__argv,
41859 + .ptr.native = (const char __force_user *const __force_user *)__argv,
41860 };
41861
41862 set_fs(KERNEL_DS);
41863 @@ -594,7 +612,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41864 unsigned long new_end = old_end - shift;
41865 struct mmu_gather tlb;
41866
41867 - BUG_ON(new_start > new_end);
41868 + if (new_start >= new_end || new_start < mmap_min_addr)
41869 + return -ENOMEM;
41870
41871 /*
41872 * ensure there are no vmas between where we want to go
41873 @@ -603,6 +622,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41874 if (vma != find_vma(mm, new_start))
41875 return -EFAULT;
41876
41877 +#ifdef CONFIG_PAX_SEGMEXEC
41878 + BUG_ON(pax_find_mirror_vma(vma));
41879 +#endif
41880 +
41881 /*
41882 * cover the whole range: [new_start, old_end)
41883 */
41884 @@ -683,10 +706,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41885 stack_top = arch_align_stack(stack_top);
41886 stack_top = PAGE_ALIGN(stack_top);
41887
41888 - if (unlikely(stack_top < mmap_min_addr) ||
41889 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41890 - return -ENOMEM;
41891 -
41892 stack_shift = vma->vm_end - stack_top;
41893
41894 bprm->p -= stack_shift;
41895 @@ -698,8 +717,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41896 bprm->exec -= stack_shift;
41897
41898 down_write(&mm->mmap_sem);
41899 +
41900 + /* Move stack pages down in memory. */
41901 + if (stack_shift) {
41902 + ret = shift_arg_pages(vma, stack_shift);
41903 + if (ret)
41904 + goto out_unlock;
41905 + }
41906 +
41907 vm_flags = VM_STACK_FLAGS;
41908
41909 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41910 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41911 + vm_flags &= ~VM_EXEC;
41912 +
41913 +#ifdef CONFIG_PAX_MPROTECT
41914 + if (mm->pax_flags & MF_PAX_MPROTECT)
41915 + vm_flags &= ~VM_MAYEXEC;
41916 +#endif
41917 +
41918 + }
41919 +#endif
41920 +
41921 /*
41922 * Adjust stack execute permissions; explicitly enable for
41923 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41924 @@ -718,13 +757,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41925 goto out_unlock;
41926 BUG_ON(prev != vma);
41927
41928 - /* Move stack pages down in memory. */
41929 - if (stack_shift) {
41930 - ret = shift_arg_pages(vma, stack_shift);
41931 - if (ret)
41932 - goto out_unlock;
41933 - }
41934 -
41935 /* mprotect_fixup is overkill to remove the temporary stack flags */
41936 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41937
41938 @@ -805,7 +837,7 @@ int kernel_read(struct file *file, loff_t offset,
41939 old_fs = get_fs();
41940 set_fs(get_ds());
41941 /* The cast to a user pointer is valid due to the set_fs() */
41942 - result = vfs_read(file, (void __user *)addr, count, &pos);
41943 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
41944 set_fs(old_fs);
41945 return result;
41946 }
41947 @@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
41948 perf_event_comm(tsk);
41949 }
41950
41951 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
41952 +{
41953 + int i, ch;
41954 +
41955 + /* Copies the binary name from after last slash */
41956 + for (i = 0; (ch = *(fn++)) != '\0';) {
41957 + if (ch == '/')
41958 + i = 0; /* overwrite what we wrote */
41959 + else
41960 + if (i < len - 1)
41961 + tcomm[i++] = ch;
41962 + }
41963 + tcomm[i] = '\0';
41964 +}
41965 +
41966 int flush_old_exec(struct linux_binprm * bprm)
41967 {
41968 int retval;
41969 @@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm)
41970
41971 set_mm_exe_file(bprm->mm, bprm->file);
41972
41973 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
41974 /*
41975 * Release all of the old mmap stuff
41976 */
41977 @@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump);
41978
41979 void setup_new_exec(struct linux_binprm * bprm)
41980 {
41981 - int i, ch;
41982 - const char *name;
41983 - char tcomm[sizeof(current->comm)];
41984 -
41985 arch_pick_mmap_layout(current->mm);
41986
41987 /* This is the point of no return */
41988 @@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm)
41989 else
41990 set_dumpable(current->mm, suid_dumpable);
41991
41992 - name = bprm->filename;
41993 -
41994 - /* Copies the binary name from after last slash */
41995 - for (i=0; (ch = *(name++)) != '\0';) {
41996 - if (ch == '/')
41997 - i = 0; /* overwrite what we wrote */
41998 - else
41999 - if (i < (sizeof(tcomm) - 1))
42000 - tcomm[i++] = ch;
42001 - }
42002 - tcomm[i] = '\0';
42003 - set_task_comm(current, tcomm);
42004 + set_task_comm(current, bprm->tcomm);
42005
42006 /* Set the new mm task size. We have to do that late because it may
42007 * depend on TIF_32BIT which is only updated in flush_thread() on
42008 @@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
42009 }
42010 rcu_read_unlock();
42011
42012 - if (p->fs->users > n_fs) {
42013 + if (atomic_read(&p->fs->users) > n_fs) {
42014 bprm->unsafe |= LSM_UNSAFE_SHARE;
42015 } else {
42016 res = -EAGAIN;
42017 @@ -1442,6 +1475,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
42018
42019 EXPORT_SYMBOL(search_binary_handler);
42020
42021 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42022 +static atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
42023 +#endif
42024 +
42025 /*
42026 * sys_execve() executes a new program.
42027 */
42028 @@ -1450,6 +1487,11 @@ static int do_execve_common(const char *filename,
42029 struct user_arg_ptr envp,
42030 struct pt_regs *regs)
42031 {
42032 +#ifdef CONFIG_GRKERNSEC
42033 + struct file *old_exec_file;
42034 + struct acl_subject_label *old_acl;
42035 + struct rlimit old_rlim[RLIM_NLIMITS];
42036 +#endif
42037 struct linux_binprm *bprm;
42038 struct file *file;
42039 struct files_struct *displaced;
42040 @@ -1457,6 +1499,8 @@ static int do_execve_common(const char *filename,
42041 int retval;
42042 const struct cred *cred = current_cred();
42043
42044 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
42045 +
42046 /*
42047 * We move the actual failure in case of RLIMIT_NPROC excess from
42048 * set*uid() to execve() because too many poorly written programs
42049 @@ -1497,12 +1541,27 @@ static int do_execve_common(const char *filename,
42050 if (IS_ERR(file))
42051 goto out_unmark;
42052
42053 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
42054 + retval = -EPERM;
42055 + goto out_file;
42056 + }
42057 +
42058 sched_exec();
42059
42060 bprm->file = file;
42061 bprm->filename = filename;
42062 bprm->interp = filename;
42063
42064 + if (gr_process_user_ban()) {
42065 + retval = -EPERM;
42066 + goto out_file;
42067 + }
42068 +
42069 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
42070 + retval = -EACCES;
42071 + goto out_file;
42072 + }
42073 +
42074 retval = bprm_mm_init(bprm);
42075 if (retval)
42076 goto out_file;
42077 @@ -1532,11 +1591,46 @@ static int do_execve_common(const char *filename,
42078 if (retval < 0)
42079 goto out;
42080
42081 + if (!gr_tpe_allow(file)) {
42082 + retval = -EACCES;
42083 + goto out;
42084 + }
42085 +
42086 + if (gr_check_crash_exec(file)) {
42087 + retval = -EACCES;
42088 + goto out;
42089 + }
42090 +
42091 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
42092 +
42093 + gr_handle_exec_args(bprm, argv);
42094 +
42095 +#ifdef CONFIG_GRKERNSEC
42096 + old_acl = current->acl;
42097 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
42098 + old_exec_file = current->exec_file;
42099 + get_file(file);
42100 + current->exec_file = file;
42101 +#endif
42102 +
42103 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
42104 + bprm->unsafe);
42105 + if (retval < 0)
42106 + goto out_fail;
42107 +
42108 retval = search_binary_handler(bprm,regs);
42109 if (retval < 0)
42110 - goto out;
42111 + goto out_fail;
42112 +#ifdef CONFIG_GRKERNSEC
42113 + if (old_exec_file)
42114 + fput(old_exec_file);
42115 +#endif
42116
42117 /* execve succeeded */
42118 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42119 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
42120 +#endif
42121 +
42122 current->fs->in_exec = 0;
42123 current->in_execve = 0;
42124 acct_update_integrals(current);
42125 @@ -1545,6 +1639,14 @@ static int do_execve_common(const char *filename,
42126 put_files_struct(displaced);
42127 return retval;
42128
42129 +out_fail:
42130 +#ifdef CONFIG_GRKERNSEC
42131 + current->acl = old_acl;
42132 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
42133 + fput(current->exec_file);
42134 + current->exec_file = old_exec_file;
42135 +#endif
42136 +
42137 out:
42138 if (bprm->mm) {
42139 acct_arg_size(bprm, 0);
42140 @@ -1618,7 +1720,7 @@ static int expand_corename(struct core_name *cn)
42141 {
42142 char *old_corename = cn->corename;
42143
42144 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
42145 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
42146 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
42147
42148 if (!cn->corename) {
42149 @@ -1715,7 +1817,7 @@ static int format_corename(struct core_name *cn, long signr)
42150 int pid_in_pattern = 0;
42151 int err = 0;
42152
42153 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
42154 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
42155 cn->corename = kmalloc(cn->size, GFP_KERNEL);
42156 cn->used = 0;
42157
42158 @@ -1812,6 +1914,218 @@ out:
42159 return ispipe;
42160 }
42161
42162 +int pax_check_flags(unsigned long *flags)
42163 +{
42164 + int retval = 0;
42165 +
42166 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
42167 + if (*flags & MF_PAX_SEGMEXEC)
42168 + {
42169 + *flags &= ~MF_PAX_SEGMEXEC;
42170 + retval = -EINVAL;
42171 + }
42172 +#endif
42173 +
42174 + if ((*flags & MF_PAX_PAGEEXEC)
42175 +
42176 +#ifdef CONFIG_PAX_PAGEEXEC
42177 + && (*flags & MF_PAX_SEGMEXEC)
42178 +#endif
42179 +
42180 + )
42181 + {
42182 + *flags &= ~MF_PAX_PAGEEXEC;
42183 + retval = -EINVAL;
42184 + }
42185 +
42186 + if ((*flags & MF_PAX_MPROTECT)
42187 +
42188 +#ifdef CONFIG_PAX_MPROTECT
42189 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42190 +#endif
42191 +
42192 + )
42193 + {
42194 + *flags &= ~MF_PAX_MPROTECT;
42195 + retval = -EINVAL;
42196 + }
42197 +
42198 + if ((*flags & MF_PAX_EMUTRAMP)
42199 +
42200 +#ifdef CONFIG_PAX_EMUTRAMP
42201 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42202 +#endif
42203 +
42204 + )
42205 + {
42206 + *flags &= ~MF_PAX_EMUTRAMP;
42207 + retval = -EINVAL;
42208 + }
42209 +
42210 + return retval;
42211 +}
42212 +
42213 +EXPORT_SYMBOL(pax_check_flags);
42214 +
42215 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42216 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
42217 +{
42218 + struct task_struct *tsk = current;
42219 + struct mm_struct *mm = current->mm;
42220 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
42221 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
42222 + char *path_exec = NULL;
42223 + char *path_fault = NULL;
42224 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
42225 +
42226 + if (buffer_exec && buffer_fault) {
42227 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
42228 +
42229 + down_read(&mm->mmap_sem);
42230 + vma = mm->mmap;
42231 + while (vma && (!vma_exec || !vma_fault)) {
42232 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
42233 + vma_exec = vma;
42234 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
42235 + vma_fault = vma;
42236 + vma = vma->vm_next;
42237 + }
42238 + if (vma_exec) {
42239 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
42240 + if (IS_ERR(path_exec))
42241 + path_exec = "<path too long>";
42242 + else {
42243 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
42244 + if (path_exec) {
42245 + *path_exec = 0;
42246 + path_exec = buffer_exec;
42247 + } else
42248 + path_exec = "<path too long>";
42249 + }
42250 + }
42251 + if (vma_fault) {
42252 + start = vma_fault->vm_start;
42253 + end = vma_fault->vm_end;
42254 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
42255 + if (vma_fault->vm_file) {
42256 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
42257 + if (IS_ERR(path_fault))
42258 + path_fault = "<path too long>";
42259 + else {
42260 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
42261 + if (path_fault) {
42262 + *path_fault = 0;
42263 + path_fault = buffer_fault;
42264 + } else
42265 + path_fault = "<path too long>";
42266 + }
42267 + } else
42268 + path_fault = "<anonymous mapping>";
42269 + }
42270 + up_read(&mm->mmap_sem);
42271 + }
42272 + if (tsk->signal->curr_ip)
42273 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
42274 + else
42275 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
42276 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
42277 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
42278 + task_uid(tsk), task_euid(tsk), pc, sp);
42279 + free_page((unsigned long)buffer_exec);
42280 + free_page((unsigned long)buffer_fault);
42281 + pax_report_insns(regs, pc, sp);
42282 + do_coredump(SIGKILL, SIGKILL, regs);
42283 +}
42284 +#endif
42285 +
42286 +#ifdef CONFIG_PAX_REFCOUNT
42287 +void pax_report_refcount_overflow(struct pt_regs *regs)
42288 +{
42289 + if (current->signal->curr_ip)
42290 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42291 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
42292 + else
42293 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42294 + current->comm, task_pid_nr(current), current_uid(), current_euid());
42295 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
42296 + show_regs(regs);
42297 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
42298 +}
42299 +#endif
42300 +
42301 +#ifdef CONFIG_PAX_USERCOPY
42302 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
42303 +int object_is_on_stack(const void *obj, unsigned long len)
42304 +{
42305 + const void * const stack = task_stack_page(current);
42306 + const void * const stackend = stack + THREAD_SIZE;
42307 +
42308 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42309 + const void *frame = NULL;
42310 + const void *oldframe;
42311 +#endif
42312 +
42313 + if (obj + len < obj)
42314 + return -1;
42315 +
42316 + if (obj + len <= stack || stackend <= obj)
42317 + return 0;
42318 +
42319 + if (obj < stack || stackend < obj + len)
42320 + return -1;
42321 +
42322 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42323 + oldframe = __builtin_frame_address(1);
42324 + if (oldframe)
42325 + frame = __builtin_frame_address(2);
42326 + /*
42327 + low ----------------------------------------------> high
42328 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
42329 + ^----------------^
42330 + allow copies only within here
42331 + */
42332 + while (stack <= frame && frame < stackend) {
42333 + /* if obj + len extends past the last frame, this
42334 + check won't pass and the next frame will be 0,
42335 + causing us to bail out and correctly report
42336 + the copy as invalid
42337 + */
42338 + if (obj + len <= frame)
42339 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
42340 + oldframe = frame;
42341 + frame = *(const void * const *)frame;
42342 + }
42343 + return -1;
42344 +#else
42345 + return 1;
42346 +#endif
42347 +}
42348 +
42349 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
42350 +{
42351 + if (current->signal->curr_ip)
42352 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42353 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42354 + else
42355 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42356 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42357 + dump_stack();
42358 + gr_handle_kernel_exploit();
42359 + do_group_exit(SIGKILL);
42360 +}
42361 +#endif
42362 +
42363 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
42364 +void pax_track_stack(void)
42365 +{
42366 + unsigned long sp = (unsigned long)&sp;
42367 + if (sp < current_thread_info()->lowest_stack &&
42368 + sp > (unsigned long)task_stack_page(current))
42369 + current_thread_info()->lowest_stack = sp;
42370 +}
42371 +EXPORT_SYMBOL(pax_track_stack);
42372 +#endif
42373 +
42374 static int zap_process(struct task_struct *start, int exit_code)
42375 {
42376 struct task_struct *t;
42377 @@ -2023,17 +2337,17 @@ static void wait_for_dump_helpers(struct file *file)
42378 pipe = file->f_path.dentry->d_inode->i_pipe;
42379
42380 pipe_lock(pipe);
42381 - pipe->readers++;
42382 - pipe->writers--;
42383 + atomic_inc(&pipe->readers);
42384 + atomic_dec(&pipe->writers);
42385
42386 - while ((pipe->readers > 1) && (!signal_pending(current))) {
42387 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42388 wake_up_interruptible_sync(&pipe->wait);
42389 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42390 pipe_wait(pipe);
42391 }
42392
42393 - pipe->readers--;
42394 - pipe->writers++;
42395 + atomic_dec(&pipe->readers);
42396 + atomic_inc(&pipe->writers);
42397 pipe_unlock(pipe);
42398
42399 }
42400 @@ -2094,7 +2408,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42401 int retval = 0;
42402 int flag = 0;
42403 int ispipe;
42404 - static atomic_t core_dump_count = ATOMIC_INIT(0);
42405 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42406 struct coredump_params cprm = {
42407 .signr = signr,
42408 .regs = regs,
42409 @@ -2109,6 +2423,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42410
42411 audit_core_dumps(signr);
42412
42413 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42414 + gr_handle_brute_attach(current, cprm.mm_flags);
42415 +
42416 binfmt = mm->binfmt;
42417 if (!binfmt || !binfmt->core_dump)
42418 goto fail;
42419 @@ -2176,7 +2493,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42420 }
42421 cprm.limit = RLIM_INFINITY;
42422
42423 - dump_count = atomic_inc_return(&core_dump_count);
42424 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
42425 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42426 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42427 task_tgid_vnr(current), current->comm);
42428 @@ -2203,6 +2520,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42429 } else {
42430 struct inode *inode;
42431
42432 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42433 +
42434 if (cprm.limit < binfmt->min_coredump)
42435 goto fail_unlock;
42436
42437 @@ -2246,7 +2565,7 @@ close_fail:
42438 filp_close(cprm.file, NULL);
42439 fail_dropcount:
42440 if (ispipe)
42441 - atomic_dec(&core_dump_count);
42442 + atomic_dec_unchecked(&core_dump_count);
42443 fail_unlock:
42444 kfree(cn.corename);
42445 fail_corename:
42446 @@ -2265,7 +2584,7 @@ fail:
42447 */
42448 int dump_write(struct file *file, const void *addr, int nr)
42449 {
42450 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42451 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42452 }
42453 EXPORT_SYMBOL(dump_write);
42454
42455 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42456 index a8cbe1b..fed04cb 100644
42457 --- a/fs/ext2/balloc.c
42458 +++ b/fs/ext2/balloc.c
42459 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42460
42461 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42462 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42463 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42464 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42465 sbi->s_resuid != current_fsuid() &&
42466 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42467 return 0;
42468 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42469 index a203892..4e64db5 100644
42470 --- a/fs/ext3/balloc.c
42471 +++ b/fs/ext3/balloc.c
42472 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42473
42474 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42475 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42476 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42477 + if (free_blocks < root_blocks + 1 &&
42478 !use_reservation && sbi->s_resuid != current_fsuid() &&
42479 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42480 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42481 + !capable_nolog(CAP_SYS_RESOURCE)) {
42482 return 0;
42483 }
42484 return 1;
42485 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42486 index 12ccacd..a6035fce0 100644
42487 --- a/fs/ext4/balloc.c
42488 +++ b/fs/ext4/balloc.c
42489 @@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42490 /* Hm, nope. Are (enough) root reserved clusters available? */
42491 if (sbi->s_resuid == current_fsuid() ||
42492 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42493 - capable(CAP_SYS_RESOURCE) ||
42494 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42495 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42496 + capable_nolog(CAP_SYS_RESOURCE)) {
42497
42498 if (free_clusters >= (nclusters + dirty_clusters))
42499 return 1;
42500 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42501 index 5b0e26a..0aa002d 100644
42502 --- a/fs/ext4/ext4.h
42503 +++ b/fs/ext4/ext4.h
42504 @@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42505 unsigned long s_mb_last_start;
42506
42507 /* stats for buddy allocator */
42508 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42509 - atomic_t s_bal_success; /* we found long enough chunks */
42510 - atomic_t s_bal_allocated; /* in blocks */
42511 - atomic_t s_bal_ex_scanned; /* total extents scanned */
42512 - atomic_t s_bal_goals; /* goal hits */
42513 - atomic_t s_bal_breaks; /* too long searches */
42514 - atomic_t s_bal_2orders; /* 2^order hits */
42515 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42516 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42517 + atomic_unchecked_t s_bal_allocated; /* in blocks */
42518 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42519 + atomic_unchecked_t s_bal_goals; /* goal hits */
42520 + atomic_unchecked_t s_bal_breaks; /* too long searches */
42521 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42522 spinlock_t s_bal_lock;
42523 unsigned long s_mb_buddies_generated;
42524 unsigned long long s_mb_generation_time;
42525 - atomic_t s_mb_lost_chunks;
42526 - atomic_t s_mb_preallocated;
42527 - atomic_t s_mb_discarded;
42528 + atomic_unchecked_t s_mb_lost_chunks;
42529 + atomic_unchecked_t s_mb_preallocated;
42530 + atomic_unchecked_t s_mb_discarded;
42531 atomic_t s_lock_busy;
42532
42533 /* locality groups */
42534 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42535 index e2d8be8..c7f0ce9 100644
42536 --- a/fs/ext4/mballoc.c
42537 +++ b/fs/ext4/mballoc.c
42538 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42539 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42540
42541 if (EXT4_SB(sb)->s_mb_stats)
42542 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42543 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42544
42545 break;
42546 }
42547 @@ -2088,7 +2088,7 @@ repeat:
42548 ac->ac_status = AC_STATUS_CONTINUE;
42549 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42550 cr = 3;
42551 - atomic_inc(&sbi->s_mb_lost_chunks);
42552 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42553 goto repeat;
42554 }
42555 }
42556 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42557 if (sbi->s_mb_stats) {
42558 ext4_msg(sb, KERN_INFO,
42559 "mballoc: %u blocks %u reqs (%u success)",
42560 - atomic_read(&sbi->s_bal_allocated),
42561 - atomic_read(&sbi->s_bal_reqs),
42562 - atomic_read(&sbi->s_bal_success));
42563 + atomic_read_unchecked(&sbi->s_bal_allocated),
42564 + atomic_read_unchecked(&sbi->s_bal_reqs),
42565 + atomic_read_unchecked(&sbi->s_bal_success));
42566 ext4_msg(sb, KERN_INFO,
42567 "mballoc: %u extents scanned, %u goal hits, "
42568 "%u 2^N hits, %u breaks, %u lost",
42569 - atomic_read(&sbi->s_bal_ex_scanned),
42570 - atomic_read(&sbi->s_bal_goals),
42571 - atomic_read(&sbi->s_bal_2orders),
42572 - atomic_read(&sbi->s_bal_breaks),
42573 - atomic_read(&sbi->s_mb_lost_chunks));
42574 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42575 + atomic_read_unchecked(&sbi->s_bal_goals),
42576 + atomic_read_unchecked(&sbi->s_bal_2orders),
42577 + atomic_read_unchecked(&sbi->s_bal_breaks),
42578 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42579 ext4_msg(sb, KERN_INFO,
42580 "mballoc: %lu generated and it took %Lu",
42581 sbi->s_mb_buddies_generated,
42582 sbi->s_mb_generation_time);
42583 ext4_msg(sb, KERN_INFO,
42584 "mballoc: %u preallocated, %u discarded",
42585 - atomic_read(&sbi->s_mb_preallocated),
42586 - atomic_read(&sbi->s_mb_discarded));
42587 + atomic_read_unchecked(&sbi->s_mb_preallocated),
42588 + atomic_read_unchecked(&sbi->s_mb_discarded));
42589 }
42590
42591 free_percpu(sbi->s_locality_groups);
42592 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42593 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42594
42595 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42596 - atomic_inc(&sbi->s_bal_reqs);
42597 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42598 + atomic_inc_unchecked(&sbi->s_bal_reqs);
42599 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42600 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42601 - atomic_inc(&sbi->s_bal_success);
42602 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42603 + atomic_inc_unchecked(&sbi->s_bal_success);
42604 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42605 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42606 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42607 - atomic_inc(&sbi->s_bal_goals);
42608 + atomic_inc_unchecked(&sbi->s_bal_goals);
42609 if (ac->ac_found > sbi->s_mb_max_to_scan)
42610 - atomic_inc(&sbi->s_bal_breaks);
42611 + atomic_inc_unchecked(&sbi->s_bal_breaks);
42612 }
42613
42614 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42615 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42616 trace_ext4_mb_new_inode_pa(ac, pa);
42617
42618 ext4_mb_use_inode_pa(ac, pa);
42619 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42620 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42621
42622 ei = EXT4_I(ac->ac_inode);
42623 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42624 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42625 trace_ext4_mb_new_group_pa(ac, pa);
42626
42627 ext4_mb_use_group_pa(ac, pa);
42628 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42629 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42630
42631 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42632 lg = ac->ac_lg;
42633 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42634 * from the bitmap and continue.
42635 */
42636 }
42637 - atomic_add(free, &sbi->s_mb_discarded);
42638 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
42639
42640 return err;
42641 }
42642 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42643 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42644 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42645 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42646 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42647 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42648 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42649
42650 return 0;
42651 diff --git a/fs/fcntl.c b/fs/fcntl.c
42652 index 22764c7..86372c9 100644
42653 --- a/fs/fcntl.c
42654 +++ b/fs/fcntl.c
42655 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42656 if (err)
42657 return err;
42658
42659 + if (gr_handle_chroot_fowner(pid, type))
42660 + return -ENOENT;
42661 + if (gr_check_protected_task_fowner(pid, type))
42662 + return -EACCES;
42663 +
42664 f_modown(filp, pid, type, force);
42665 return 0;
42666 }
42667 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42668
42669 static int f_setown_ex(struct file *filp, unsigned long arg)
42670 {
42671 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42672 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42673 struct f_owner_ex owner;
42674 struct pid *pid;
42675 int type;
42676 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42677
42678 static int f_getown_ex(struct file *filp, unsigned long arg)
42679 {
42680 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42681 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42682 struct f_owner_ex owner;
42683 int ret = 0;
42684
42685 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42686 switch (cmd) {
42687 case F_DUPFD:
42688 case F_DUPFD_CLOEXEC:
42689 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42690 if (arg >= rlimit(RLIMIT_NOFILE))
42691 break;
42692 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42693 diff --git a/fs/fifo.c b/fs/fifo.c
42694 index b1a524d..4ee270e 100644
42695 --- a/fs/fifo.c
42696 +++ b/fs/fifo.c
42697 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42698 */
42699 filp->f_op = &read_pipefifo_fops;
42700 pipe->r_counter++;
42701 - if (pipe->readers++ == 0)
42702 + if (atomic_inc_return(&pipe->readers) == 1)
42703 wake_up_partner(inode);
42704
42705 - if (!pipe->writers) {
42706 + if (!atomic_read(&pipe->writers)) {
42707 if ((filp->f_flags & O_NONBLOCK)) {
42708 /* suppress POLLHUP until we have
42709 * seen a writer */
42710 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42711 * errno=ENXIO when there is no process reading the FIFO.
42712 */
42713 ret = -ENXIO;
42714 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42715 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42716 goto err;
42717
42718 filp->f_op = &write_pipefifo_fops;
42719 pipe->w_counter++;
42720 - if (!pipe->writers++)
42721 + if (atomic_inc_return(&pipe->writers) == 1)
42722 wake_up_partner(inode);
42723
42724 - if (!pipe->readers) {
42725 + if (!atomic_read(&pipe->readers)) {
42726 wait_for_partner(inode, &pipe->r_counter);
42727 if (signal_pending(current))
42728 goto err_wr;
42729 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42730 */
42731 filp->f_op = &rdwr_pipefifo_fops;
42732
42733 - pipe->readers++;
42734 - pipe->writers++;
42735 + atomic_inc(&pipe->readers);
42736 + atomic_inc(&pipe->writers);
42737 pipe->r_counter++;
42738 pipe->w_counter++;
42739 - if (pipe->readers == 1 || pipe->writers == 1)
42740 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42741 wake_up_partner(inode);
42742 break;
42743
42744 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42745 return 0;
42746
42747 err_rd:
42748 - if (!--pipe->readers)
42749 + if (atomic_dec_and_test(&pipe->readers))
42750 wake_up_interruptible(&pipe->wait);
42751 ret = -ERESTARTSYS;
42752 goto err;
42753
42754 err_wr:
42755 - if (!--pipe->writers)
42756 + if (atomic_dec_and_test(&pipe->writers))
42757 wake_up_interruptible(&pipe->wait);
42758 ret = -ERESTARTSYS;
42759 goto err;
42760
42761 err:
42762 - if (!pipe->readers && !pipe->writers)
42763 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42764 free_pipe_info(inode);
42765
42766 err_nocleanup:
42767 diff --git a/fs/file.c b/fs/file.c
42768 index 4c6992d..104cdea 100644
42769 --- a/fs/file.c
42770 +++ b/fs/file.c
42771 @@ -15,6 +15,7 @@
42772 #include <linux/slab.h>
42773 #include <linux/vmalloc.h>
42774 #include <linux/file.h>
42775 +#include <linux/security.h>
42776 #include <linux/fdtable.h>
42777 #include <linux/bitops.h>
42778 #include <linux/interrupt.h>
42779 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42780 * N.B. For clone tasks sharing a files structure, this test
42781 * will limit the total number of files that can be opened.
42782 */
42783 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42784 if (nr >= rlimit(RLIMIT_NOFILE))
42785 return -EMFILE;
42786
42787 diff --git a/fs/filesystems.c b/fs/filesystems.c
42788 index 0845f84..7b4ebef 100644
42789 --- a/fs/filesystems.c
42790 +++ b/fs/filesystems.c
42791 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42792 int len = dot ? dot - name : strlen(name);
42793
42794 fs = __get_fs_type(name, len);
42795 +
42796 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
42797 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42798 +#else
42799 if (!fs && (request_module("%.*s", len, name) == 0))
42800 +#endif
42801 fs = __get_fs_type(name, len);
42802
42803 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42804 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42805 index 78b519c..a8b4979 100644
42806 --- a/fs/fs_struct.c
42807 +++ b/fs/fs_struct.c
42808 @@ -4,6 +4,7 @@
42809 #include <linux/path.h>
42810 #include <linux/slab.h>
42811 #include <linux/fs_struct.h>
42812 +#include <linux/grsecurity.h>
42813 #include "internal.h"
42814
42815 static inline void path_get_longterm(struct path *path)
42816 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42817 old_root = fs->root;
42818 fs->root = *path;
42819 path_get_longterm(path);
42820 + gr_set_chroot_entries(current, path);
42821 write_seqcount_end(&fs->seq);
42822 spin_unlock(&fs->lock);
42823 if (old_root.dentry)
42824 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42825 && fs->root.mnt == old_root->mnt) {
42826 path_get_longterm(new_root);
42827 fs->root = *new_root;
42828 + gr_set_chroot_entries(p, new_root);
42829 count++;
42830 }
42831 if (fs->pwd.dentry == old_root->dentry
42832 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42833 spin_lock(&fs->lock);
42834 write_seqcount_begin(&fs->seq);
42835 tsk->fs = NULL;
42836 - kill = !--fs->users;
42837 + gr_clear_chroot_entries(tsk);
42838 + kill = !atomic_dec_return(&fs->users);
42839 write_seqcount_end(&fs->seq);
42840 spin_unlock(&fs->lock);
42841 task_unlock(tsk);
42842 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42843 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42844 /* We don't need to lock fs - think why ;-) */
42845 if (fs) {
42846 - fs->users = 1;
42847 + atomic_set(&fs->users, 1);
42848 fs->in_exec = 0;
42849 spin_lock_init(&fs->lock);
42850 seqcount_init(&fs->seq);
42851 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42852 spin_lock(&old->lock);
42853 fs->root = old->root;
42854 path_get_longterm(&fs->root);
42855 + /* instead of calling gr_set_chroot_entries here,
42856 + we call it from every caller of this function
42857 + */
42858 fs->pwd = old->pwd;
42859 path_get_longterm(&fs->pwd);
42860 spin_unlock(&old->lock);
42861 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42862
42863 task_lock(current);
42864 spin_lock(&fs->lock);
42865 - kill = !--fs->users;
42866 + kill = !atomic_dec_return(&fs->users);
42867 current->fs = new_fs;
42868 + gr_set_chroot_entries(current, &new_fs->root);
42869 spin_unlock(&fs->lock);
42870 task_unlock(current);
42871
42872 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
42873
42874 int current_umask(void)
42875 {
42876 - return current->fs->umask;
42877 + return current->fs->umask | gr_acl_umask();
42878 }
42879 EXPORT_SYMBOL(current_umask);
42880
42881 /* to be mentioned only in INIT_TASK */
42882 struct fs_struct init_fs = {
42883 - .users = 1,
42884 + .users = ATOMIC_INIT(1),
42885 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42886 .seq = SEQCNT_ZERO,
42887 .umask = 0022,
42888 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42889 task_lock(current);
42890
42891 spin_lock(&init_fs.lock);
42892 - init_fs.users++;
42893 + atomic_inc(&init_fs.users);
42894 spin_unlock(&init_fs.lock);
42895
42896 spin_lock(&fs->lock);
42897 current->fs = &init_fs;
42898 - kill = !--fs->users;
42899 + gr_set_chroot_entries(current, &current->fs->root);
42900 + kill = !atomic_dec_return(&fs->users);
42901 spin_unlock(&fs->lock);
42902
42903 task_unlock(current);
42904 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42905 index 9905350..02eaec4 100644
42906 --- a/fs/fscache/cookie.c
42907 +++ b/fs/fscache/cookie.c
42908 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42909 parent ? (char *) parent->def->name : "<no-parent>",
42910 def->name, netfs_data);
42911
42912 - fscache_stat(&fscache_n_acquires);
42913 + fscache_stat_unchecked(&fscache_n_acquires);
42914
42915 /* if there's no parent cookie, then we don't create one here either */
42916 if (!parent) {
42917 - fscache_stat(&fscache_n_acquires_null);
42918 + fscache_stat_unchecked(&fscache_n_acquires_null);
42919 _leave(" [no parent]");
42920 return NULL;
42921 }
42922 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42923 /* allocate and initialise a cookie */
42924 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42925 if (!cookie) {
42926 - fscache_stat(&fscache_n_acquires_oom);
42927 + fscache_stat_unchecked(&fscache_n_acquires_oom);
42928 _leave(" [ENOMEM]");
42929 return NULL;
42930 }
42931 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42932
42933 switch (cookie->def->type) {
42934 case FSCACHE_COOKIE_TYPE_INDEX:
42935 - fscache_stat(&fscache_n_cookie_index);
42936 + fscache_stat_unchecked(&fscache_n_cookie_index);
42937 break;
42938 case FSCACHE_COOKIE_TYPE_DATAFILE:
42939 - fscache_stat(&fscache_n_cookie_data);
42940 + fscache_stat_unchecked(&fscache_n_cookie_data);
42941 break;
42942 default:
42943 - fscache_stat(&fscache_n_cookie_special);
42944 + fscache_stat_unchecked(&fscache_n_cookie_special);
42945 break;
42946 }
42947
42948 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42949 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42950 atomic_dec(&parent->n_children);
42951 __fscache_cookie_put(cookie);
42952 - fscache_stat(&fscache_n_acquires_nobufs);
42953 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42954 _leave(" = NULL");
42955 return NULL;
42956 }
42957 }
42958
42959 - fscache_stat(&fscache_n_acquires_ok);
42960 + fscache_stat_unchecked(&fscache_n_acquires_ok);
42961 _leave(" = %p", cookie);
42962 return cookie;
42963 }
42964 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42965 cache = fscache_select_cache_for_object(cookie->parent);
42966 if (!cache) {
42967 up_read(&fscache_addremove_sem);
42968 - fscache_stat(&fscache_n_acquires_no_cache);
42969 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42970 _leave(" = -ENOMEDIUM [no cache]");
42971 return -ENOMEDIUM;
42972 }
42973 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42974 object = cache->ops->alloc_object(cache, cookie);
42975 fscache_stat_d(&fscache_n_cop_alloc_object);
42976 if (IS_ERR(object)) {
42977 - fscache_stat(&fscache_n_object_no_alloc);
42978 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
42979 ret = PTR_ERR(object);
42980 goto error;
42981 }
42982
42983 - fscache_stat(&fscache_n_object_alloc);
42984 + fscache_stat_unchecked(&fscache_n_object_alloc);
42985
42986 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42987
42988 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42989 struct fscache_object *object;
42990 struct hlist_node *_p;
42991
42992 - fscache_stat(&fscache_n_updates);
42993 + fscache_stat_unchecked(&fscache_n_updates);
42994
42995 if (!cookie) {
42996 - fscache_stat(&fscache_n_updates_null);
42997 + fscache_stat_unchecked(&fscache_n_updates_null);
42998 _leave(" [no cookie]");
42999 return;
43000 }
43001 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
43002 struct fscache_object *object;
43003 unsigned long event;
43004
43005 - fscache_stat(&fscache_n_relinquishes);
43006 + fscache_stat_unchecked(&fscache_n_relinquishes);
43007 if (retire)
43008 - fscache_stat(&fscache_n_relinquishes_retire);
43009 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
43010
43011 if (!cookie) {
43012 - fscache_stat(&fscache_n_relinquishes_null);
43013 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
43014 _leave(" [no cookie]");
43015 return;
43016 }
43017 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
43018
43019 /* wait for the cookie to finish being instantiated (or to fail) */
43020 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
43021 - fscache_stat(&fscache_n_relinquishes_waitcrt);
43022 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
43023 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
43024 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
43025 }
43026 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
43027 index f6aad48..88dcf26 100644
43028 --- a/fs/fscache/internal.h
43029 +++ b/fs/fscache/internal.h
43030 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
43031 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
43032 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
43033
43034 -extern atomic_t fscache_n_op_pend;
43035 -extern atomic_t fscache_n_op_run;
43036 -extern atomic_t fscache_n_op_enqueue;
43037 -extern atomic_t fscache_n_op_deferred_release;
43038 -extern atomic_t fscache_n_op_release;
43039 -extern atomic_t fscache_n_op_gc;
43040 -extern atomic_t fscache_n_op_cancelled;
43041 -extern atomic_t fscache_n_op_rejected;
43042 +extern atomic_unchecked_t fscache_n_op_pend;
43043 +extern atomic_unchecked_t fscache_n_op_run;
43044 +extern atomic_unchecked_t fscache_n_op_enqueue;
43045 +extern atomic_unchecked_t fscache_n_op_deferred_release;
43046 +extern atomic_unchecked_t fscache_n_op_release;
43047 +extern atomic_unchecked_t fscache_n_op_gc;
43048 +extern atomic_unchecked_t fscache_n_op_cancelled;
43049 +extern atomic_unchecked_t fscache_n_op_rejected;
43050
43051 -extern atomic_t fscache_n_attr_changed;
43052 -extern atomic_t fscache_n_attr_changed_ok;
43053 -extern atomic_t fscache_n_attr_changed_nobufs;
43054 -extern atomic_t fscache_n_attr_changed_nomem;
43055 -extern atomic_t fscache_n_attr_changed_calls;
43056 +extern atomic_unchecked_t fscache_n_attr_changed;
43057 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
43058 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
43059 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
43060 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
43061
43062 -extern atomic_t fscache_n_allocs;
43063 -extern atomic_t fscache_n_allocs_ok;
43064 -extern atomic_t fscache_n_allocs_wait;
43065 -extern atomic_t fscache_n_allocs_nobufs;
43066 -extern atomic_t fscache_n_allocs_intr;
43067 -extern atomic_t fscache_n_allocs_object_dead;
43068 -extern atomic_t fscache_n_alloc_ops;
43069 -extern atomic_t fscache_n_alloc_op_waits;
43070 +extern atomic_unchecked_t fscache_n_allocs;
43071 +extern atomic_unchecked_t fscache_n_allocs_ok;
43072 +extern atomic_unchecked_t fscache_n_allocs_wait;
43073 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
43074 +extern atomic_unchecked_t fscache_n_allocs_intr;
43075 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
43076 +extern atomic_unchecked_t fscache_n_alloc_ops;
43077 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
43078
43079 -extern atomic_t fscache_n_retrievals;
43080 -extern atomic_t fscache_n_retrievals_ok;
43081 -extern atomic_t fscache_n_retrievals_wait;
43082 -extern atomic_t fscache_n_retrievals_nodata;
43083 -extern atomic_t fscache_n_retrievals_nobufs;
43084 -extern atomic_t fscache_n_retrievals_intr;
43085 -extern atomic_t fscache_n_retrievals_nomem;
43086 -extern atomic_t fscache_n_retrievals_object_dead;
43087 -extern atomic_t fscache_n_retrieval_ops;
43088 -extern atomic_t fscache_n_retrieval_op_waits;
43089 +extern atomic_unchecked_t fscache_n_retrievals;
43090 +extern atomic_unchecked_t fscache_n_retrievals_ok;
43091 +extern atomic_unchecked_t fscache_n_retrievals_wait;
43092 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
43093 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
43094 +extern atomic_unchecked_t fscache_n_retrievals_intr;
43095 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
43096 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
43097 +extern atomic_unchecked_t fscache_n_retrieval_ops;
43098 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
43099
43100 -extern atomic_t fscache_n_stores;
43101 -extern atomic_t fscache_n_stores_ok;
43102 -extern atomic_t fscache_n_stores_again;
43103 -extern atomic_t fscache_n_stores_nobufs;
43104 -extern atomic_t fscache_n_stores_oom;
43105 -extern atomic_t fscache_n_store_ops;
43106 -extern atomic_t fscache_n_store_calls;
43107 -extern atomic_t fscache_n_store_pages;
43108 -extern atomic_t fscache_n_store_radix_deletes;
43109 -extern atomic_t fscache_n_store_pages_over_limit;
43110 +extern atomic_unchecked_t fscache_n_stores;
43111 +extern atomic_unchecked_t fscache_n_stores_ok;
43112 +extern atomic_unchecked_t fscache_n_stores_again;
43113 +extern atomic_unchecked_t fscache_n_stores_nobufs;
43114 +extern atomic_unchecked_t fscache_n_stores_oom;
43115 +extern atomic_unchecked_t fscache_n_store_ops;
43116 +extern atomic_unchecked_t fscache_n_store_calls;
43117 +extern atomic_unchecked_t fscache_n_store_pages;
43118 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
43119 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
43120
43121 -extern atomic_t fscache_n_store_vmscan_not_storing;
43122 -extern atomic_t fscache_n_store_vmscan_gone;
43123 -extern atomic_t fscache_n_store_vmscan_busy;
43124 -extern atomic_t fscache_n_store_vmscan_cancelled;
43125 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43126 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
43127 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
43128 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43129
43130 -extern atomic_t fscache_n_marks;
43131 -extern atomic_t fscache_n_uncaches;
43132 +extern atomic_unchecked_t fscache_n_marks;
43133 +extern atomic_unchecked_t fscache_n_uncaches;
43134
43135 -extern atomic_t fscache_n_acquires;
43136 -extern atomic_t fscache_n_acquires_null;
43137 -extern atomic_t fscache_n_acquires_no_cache;
43138 -extern atomic_t fscache_n_acquires_ok;
43139 -extern atomic_t fscache_n_acquires_nobufs;
43140 -extern atomic_t fscache_n_acquires_oom;
43141 +extern atomic_unchecked_t fscache_n_acquires;
43142 +extern atomic_unchecked_t fscache_n_acquires_null;
43143 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
43144 +extern atomic_unchecked_t fscache_n_acquires_ok;
43145 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
43146 +extern atomic_unchecked_t fscache_n_acquires_oom;
43147
43148 -extern atomic_t fscache_n_updates;
43149 -extern atomic_t fscache_n_updates_null;
43150 -extern atomic_t fscache_n_updates_run;
43151 +extern atomic_unchecked_t fscache_n_updates;
43152 +extern atomic_unchecked_t fscache_n_updates_null;
43153 +extern atomic_unchecked_t fscache_n_updates_run;
43154
43155 -extern atomic_t fscache_n_relinquishes;
43156 -extern atomic_t fscache_n_relinquishes_null;
43157 -extern atomic_t fscache_n_relinquishes_waitcrt;
43158 -extern atomic_t fscache_n_relinquishes_retire;
43159 +extern atomic_unchecked_t fscache_n_relinquishes;
43160 +extern atomic_unchecked_t fscache_n_relinquishes_null;
43161 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43162 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
43163
43164 -extern atomic_t fscache_n_cookie_index;
43165 -extern atomic_t fscache_n_cookie_data;
43166 -extern atomic_t fscache_n_cookie_special;
43167 +extern atomic_unchecked_t fscache_n_cookie_index;
43168 +extern atomic_unchecked_t fscache_n_cookie_data;
43169 +extern atomic_unchecked_t fscache_n_cookie_special;
43170
43171 -extern atomic_t fscache_n_object_alloc;
43172 -extern atomic_t fscache_n_object_no_alloc;
43173 -extern atomic_t fscache_n_object_lookups;
43174 -extern atomic_t fscache_n_object_lookups_negative;
43175 -extern atomic_t fscache_n_object_lookups_positive;
43176 -extern atomic_t fscache_n_object_lookups_timed_out;
43177 -extern atomic_t fscache_n_object_created;
43178 -extern atomic_t fscache_n_object_avail;
43179 -extern atomic_t fscache_n_object_dead;
43180 +extern atomic_unchecked_t fscache_n_object_alloc;
43181 +extern atomic_unchecked_t fscache_n_object_no_alloc;
43182 +extern atomic_unchecked_t fscache_n_object_lookups;
43183 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
43184 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
43185 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
43186 +extern atomic_unchecked_t fscache_n_object_created;
43187 +extern atomic_unchecked_t fscache_n_object_avail;
43188 +extern atomic_unchecked_t fscache_n_object_dead;
43189
43190 -extern atomic_t fscache_n_checkaux_none;
43191 -extern atomic_t fscache_n_checkaux_okay;
43192 -extern atomic_t fscache_n_checkaux_update;
43193 -extern atomic_t fscache_n_checkaux_obsolete;
43194 +extern atomic_unchecked_t fscache_n_checkaux_none;
43195 +extern atomic_unchecked_t fscache_n_checkaux_okay;
43196 +extern atomic_unchecked_t fscache_n_checkaux_update;
43197 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
43198
43199 extern atomic_t fscache_n_cop_alloc_object;
43200 extern atomic_t fscache_n_cop_lookup_object;
43201 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
43202 atomic_inc(stat);
43203 }
43204
43205 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
43206 +{
43207 + atomic_inc_unchecked(stat);
43208 +}
43209 +
43210 static inline void fscache_stat_d(atomic_t *stat)
43211 {
43212 atomic_dec(stat);
43213 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
43214
43215 #define __fscache_stat(stat) (NULL)
43216 #define fscache_stat(stat) do {} while (0)
43217 +#define fscache_stat_unchecked(stat) do {} while (0)
43218 #define fscache_stat_d(stat) do {} while (0)
43219 #endif
43220
43221 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
43222 index b6b897c..0ffff9c 100644
43223 --- a/fs/fscache/object.c
43224 +++ b/fs/fscache/object.c
43225 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43226 /* update the object metadata on disk */
43227 case FSCACHE_OBJECT_UPDATING:
43228 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
43229 - fscache_stat(&fscache_n_updates_run);
43230 + fscache_stat_unchecked(&fscache_n_updates_run);
43231 fscache_stat(&fscache_n_cop_update_object);
43232 object->cache->ops->update_object(object);
43233 fscache_stat_d(&fscache_n_cop_update_object);
43234 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43235 spin_lock(&object->lock);
43236 object->state = FSCACHE_OBJECT_DEAD;
43237 spin_unlock(&object->lock);
43238 - fscache_stat(&fscache_n_object_dead);
43239 + fscache_stat_unchecked(&fscache_n_object_dead);
43240 goto terminal_transit;
43241
43242 /* handle the parent cache of this object being withdrawn from
43243 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43244 spin_lock(&object->lock);
43245 object->state = FSCACHE_OBJECT_DEAD;
43246 spin_unlock(&object->lock);
43247 - fscache_stat(&fscache_n_object_dead);
43248 + fscache_stat_unchecked(&fscache_n_object_dead);
43249 goto terminal_transit;
43250
43251 /* complain about the object being woken up once it is
43252 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43253 parent->cookie->def->name, cookie->def->name,
43254 object->cache->tag->name);
43255
43256 - fscache_stat(&fscache_n_object_lookups);
43257 + fscache_stat_unchecked(&fscache_n_object_lookups);
43258 fscache_stat(&fscache_n_cop_lookup_object);
43259 ret = object->cache->ops->lookup_object(object);
43260 fscache_stat_d(&fscache_n_cop_lookup_object);
43261 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43262 if (ret == -ETIMEDOUT) {
43263 /* probably stuck behind another object, so move this one to
43264 * the back of the queue */
43265 - fscache_stat(&fscache_n_object_lookups_timed_out);
43266 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
43267 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43268 }
43269
43270 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
43271
43272 spin_lock(&object->lock);
43273 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43274 - fscache_stat(&fscache_n_object_lookups_negative);
43275 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
43276
43277 /* transit here to allow write requests to begin stacking up
43278 * and read requests to begin returning ENODATA */
43279 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
43280 * result, in which case there may be data available */
43281 spin_lock(&object->lock);
43282 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43283 - fscache_stat(&fscache_n_object_lookups_positive);
43284 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
43285
43286 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
43287
43288 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
43289 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43290 } else {
43291 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
43292 - fscache_stat(&fscache_n_object_created);
43293 + fscache_stat_unchecked(&fscache_n_object_created);
43294
43295 object->state = FSCACHE_OBJECT_AVAILABLE;
43296 spin_unlock(&object->lock);
43297 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
43298 fscache_enqueue_dependents(object);
43299
43300 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
43301 - fscache_stat(&fscache_n_object_avail);
43302 + fscache_stat_unchecked(&fscache_n_object_avail);
43303
43304 _leave("");
43305 }
43306 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43307 enum fscache_checkaux result;
43308
43309 if (!object->cookie->def->check_aux) {
43310 - fscache_stat(&fscache_n_checkaux_none);
43311 + fscache_stat_unchecked(&fscache_n_checkaux_none);
43312 return FSCACHE_CHECKAUX_OKAY;
43313 }
43314
43315 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43316 switch (result) {
43317 /* entry okay as is */
43318 case FSCACHE_CHECKAUX_OKAY:
43319 - fscache_stat(&fscache_n_checkaux_okay);
43320 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
43321 break;
43322
43323 /* entry requires update */
43324 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
43325 - fscache_stat(&fscache_n_checkaux_update);
43326 + fscache_stat_unchecked(&fscache_n_checkaux_update);
43327 break;
43328
43329 /* entry requires deletion */
43330 case FSCACHE_CHECKAUX_OBSOLETE:
43331 - fscache_stat(&fscache_n_checkaux_obsolete);
43332 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
43333 break;
43334
43335 default:
43336 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
43337 index 30afdfa..2256596 100644
43338 --- a/fs/fscache/operation.c
43339 +++ b/fs/fscache/operation.c
43340 @@ -17,7 +17,7 @@
43341 #include <linux/slab.h>
43342 #include "internal.h"
43343
43344 -atomic_t fscache_op_debug_id;
43345 +atomic_unchecked_t fscache_op_debug_id;
43346 EXPORT_SYMBOL(fscache_op_debug_id);
43347
43348 /**
43349 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
43350 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
43351 ASSERTCMP(atomic_read(&op->usage), >, 0);
43352
43353 - fscache_stat(&fscache_n_op_enqueue);
43354 + fscache_stat_unchecked(&fscache_n_op_enqueue);
43355 switch (op->flags & FSCACHE_OP_TYPE) {
43356 case FSCACHE_OP_ASYNC:
43357 _debug("queue async");
43358 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
43359 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
43360 if (op->processor)
43361 fscache_enqueue_operation(op);
43362 - fscache_stat(&fscache_n_op_run);
43363 + fscache_stat_unchecked(&fscache_n_op_run);
43364 }
43365
43366 /*
43367 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43368 if (object->n_ops > 1) {
43369 atomic_inc(&op->usage);
43370 list_add_tail(&op->pend_link, &object->pending_ops);
43371 - fscache_stat(&fscache_n_op_pend);
43372 + fscache_stat_unchecked(&fscache_n_op_pend);
43373 } else if (!list_empty(&object->pending_ops)) {
43374 atomic_inc(&op->usage);
43375 list_add_tail(&op->pend_link, &object->pending_ops);
43376 - fscache_stat(&fscache_n_op_pend);
43377 + fscache_stat_unchecked(&fscache_n_op_pend);
43378 fscache_start_operations(object);
43379 } else {
43380 ASSERTCMP(object->n_in_progress, ==, 0);
43381 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43382 object->n_exclusive++; /* reads and writes must wait */
43383 atomic_inc(&op->usage);
43384 list_add_tail(&op->pend_link, &object->pending_ops);
43385 - fscache_stat(&fscache_n_op_pend);
43386 + fscache_stat_unchecked(&fscache_n_op_pend);
43387 ret = 0;
43388 } else {
43389 /* not allowed to submit ops in any other state */
43390 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43391 if (object->n_exclusive > 0) {
43392 atomic_inc(&op->usage);
43393 list_add_tail(&op->pend_link, &object->pending_ops);
43394 - fscache_stat(&fscache_n_op_pend);
43395 + fscache_stat_unchecked(&fscache_n_op_pend);
43396 } else if (!list_empty(&object->pending_ops)) {
43397 atomic_inc(&op->usage);
43398 list_add_tail(&op->pend_link, &object->pending_ops);
43399 - fscache_stat(&fscache_n_op_pend);
43400 + fscache_stat_unchecked(&fscache_n_op_pend);
43401 fscache_start_operations(object);
43402 } else {
43403 ASSERTCMP(object->n_exclusive, ==, 0);
43404 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43405 object->n_ops++;
43406 atomic_inc(&op->usage);
43407 list_add_tail(&op->pend_link, &object->pending_ops);
43408 - fscache_stat(&fscache_n_op_pend);
43409 + fscache_stat_unchecked(&fscache_n_op_pend);
43410 ret = 0;
43411 } else if (object->state == FSCACHE_OBJECT_DYING ||
43412 object->state == FSCACHE_OBJECT_LC_DYING ||
43413 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43414 - fscache_stat(&fscache_n_op_rejected);
43415 + fscache_stat_unchecked(&fscache_n_op_rejected);
43416 ret = -ENOBUFS;
43417 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43418 fscache_report_unexpected_submission(object, op, ostate);
43419 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43420
43421 ret = -EBUSY;
43422 if (!list_empty(&op->pend_link)) {
43423 - fscache_stat(&fscache_n_op_cancelled);
43424 + fscache_stat_unchecked(&fscache_n_op_cancelled);
43425 list_del_init(&op->pend_link);
43426 object->n_ops--;
43427 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43428 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43429 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43430 BUG();
43431
43432 - fscache_stat(&fscache_n_op_release);
43433 + fscache_stat_unchecked(&fscache_n_op_release);
43434
43435 if (op->release) {
43436 op->release(op);
43437 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43438 * lock, and defer it otherwise */
43439 if (!spin_trylock(&object->lock)) {
43440 _debug("defer put");
43441 - fscache_stat(&fscache_n_op_deferred_release);
43442 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
43443
43444 cache = object->cache;
43445 spin_lock(&cache->op_gc_list_lock);
43446 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43447
43448 _debug("GC DEFERRED REL OBJ%x OP%x",
43449 object->debug_id, op->debug_id);
43450 - fscache_stat(&fscache_n_op_gc);
43451 + fscache_stat_unchecked(&fscache_n_op_gc);
43452
43453 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43454
43455 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43456 index 3f7a59b..cf196cc 100644
43457 --- a/fs/fscache/page.c
43458 +++ b/fs/fscache/page.c
43459 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43460 val = radix_tree_lookup(&cookie->stores, page->index);
43461 if (!val) {
43462 rcu_read_unlock();
43463 - fscache_stat(&fscache_n_store_vmscan_not_storing);
43464 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43465 __fscache_uncache_page(cookie, page);
43466 return true;
43467 }
43468 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43469 spin_unlock(&cookie->stores_lock);
43470
43471 if (xpage) {
43472 - fscache_stat(&fscache_n_store_vmscan_cancelled);
43473 - fscache_stat(&fscache_n_store_radix_deletes);
43474 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43475 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43476 ASSERTCMP(xpage, ==, page);
43477 } else {
43478 - fscache_stat(&fscache_n_store_vmscan_gone);
43479 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43480 }
43481
43482 wake_up_bit(&cookie->flags, 0);
43483 @@ -107,7 +107,7 @@ page_busy:
43484 /* we might want to wait here, but that could deadlock the allocator as
43485 * the work threads writing to the cache may all end up sleeping
43486 * on memory allocation */
43487 - fscache_stat(&fscache_n_store_vmscan_busy);
43488 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43489 return false;
43490 }
43491 EXPORT_SYMBOL(__fscache_maybe_release_page);
43492 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43493 FSCACHE_COOKIE_STORING_TAG);
43494 if (!radix_tree_tag_get(&cookie->stores, page->index,
43495 FSCACHE_COOKIE_PENDING_TAG)) {
43496 - fscache_stat(&fscache_n_store_radix_deletes);
43497 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43498 xpage = radix_tree_delete(&cookie->stores, page->index);
43499 }
43500 spin_unlock(&cookie->stores_lock);
43501 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43502
43503 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43504
43505 - fscache_stat(&fscache_n_attr_changed_calls);
43506 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43507
43508 if (fscache_object_is_active(object)) {
43509 fscache_stat(&fscache_n_cop_attr_changed);
43510 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43511
43512 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43513
43514 - fscache_stat(&fscache_n_attr_changed);
43515 + fscache_stat_unchecked(&fscache_n_attr_changed);
43516
43517 op = kzalloc(sizeof(*op), GFP_KERNEL);
43518 if (!op) {
43519 - fscache_stat(&fscache_n_attr_changed_nomem);
43520 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43521 _leave(" = -ENOMEM");
43522 return -ENOMEM;
43523 }
43524 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43525 if (fscache_submit_exclusive_op(object, op) < 0)
43526 goto nobufs;
43527 spin_unlock(&cookie->lock);
43528 - fscache_stat(&fscache_n_attr_changed_ok);
43529 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43530 fscache_put_operation(op);
43531 _leave(" = 0");
43532 return 0;
43533 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43534 nobufs:
43535 spin_unlock(&cookie->lock);
43536 kfree(op);
43537 - fscache_stat(&fscache_n_attr_changed_nobufs);
43538 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43539 _leave(" = %d", -ENOBUFS);
43540 return -ENOBUFS;
43541 }
43542 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43543 /* allocate a retrieval operation and attempt to submit it */
43544 op = kzalloc(sizeof(*op), GFP_NOIO);
43545 if (!op) {
43546 - fscache_stat(&fscache_n_retrievals_nomem);
43547 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43548 return NULL;
43549 }
43550
43551 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43552 return 0;
43553 }
43554
43555 - fscache_stat(&fscache_n_retrievals_wait);
43556 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
43557
43558 jif = jiffies;
43559 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43560 fscache_wait_bit_interruptible,
43561 TASK_INTERRUPTIBLE) != 0) {
43562 - fscache_stat(&fscache_n_retrievals_intr);
43563 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43564 _leave(" = -ERESTARTSYS");
43565 return -ERESTARTSYS;
43566 }
43567 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43568 */
43569 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43570 struct fscache_retrieval *op,
43571 - atomic_t *stat_op_waits,
43572 - atomic_t *stat_object_dead)
43573 + atomic_unchecked_t *stat_op_waits,
43574 + atomic_unchecked_t *stat_object_dead)
43575 {
43576 int ret;
43577
43578 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43579 goto check_if_dead;
43580
43581 _debug(">>> WT");
43582 - fscache_stat(stat_op_waits);
43583 + fscache_stat_unchecked(stat_op_waits);
43584 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43585 fscache_wait_bit_interruptible,
43586 TASK_INTERRUPTIBLE) < 0) {
43587 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43588
43589 check_if_dead:
43590 if (unlikely(fscache_object_is_dead(object))) {
43591 - fscache_stat(stat_object_dead);
43592 + fscache_stat_unchecked(stat_object_dead);
43593 return -ENOBUFS;
43594 }
43595 return 0;
43596 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43597
43598 _enter("%p,%p,,,", cookie, page);
43599
43600 - fscache_stat(&fscache_n_retrievals);
43601 + fscache_stat_unchecked(&fscache_n_retrievals);
43602
43603 if (hlist_empty(&cookie->backing_objects))
43604 goto nobufs;
43605 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43606 goto nobufs_unlock;
43607 spin_unlock(&cookie->lock);
43608
43609 - fscache_stat(&fscache_n_retrieval_ops);
43610 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43611
43612 /* pin the netfs read context in case we need to do the actual netfs
43613 * read because we've encountered a cache read failure */
43614 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43615
43616 error:
43617 if (ret == -ENOMEM)
43618 - fscache_stat(&fscache_n_retrievals_nomem);
43619 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43620 else if (ret == -ERESTARTSYS)
43621 - fscache_stat(&fscache_n_retrievals_intr);
43622 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43623 else if (ret == -ENODATA)
43624 - fscache_stat(&fscache_n_retrievals_nodata);
43625 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43626 else if (ret < 0)
43627 - fscache_stat(&fscache_n_retrievals_nobufs);
43628 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43629 else
43630 - fscache_stat(&fscache_n_retrievals_ok);
43631 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43632
43633 fscache_put_retrieval(op);
43634 _leave(" = %d", ret);
43635 @@ -429,7 +429,7 @@ nobufs_unlock:
43636 spin_unlock(&cookie->lock);
43637 kfree(op);
43638 nobufs:
43639 - fscache_stat(&fscache_n_retrievals_nobufs);
43640 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43641 _leave(" = -ENOBUFS");
43642 return -ENOBUFS;
43643 }
43644 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43645
43646 _enter("%p,,%d,,,", cookie, *nr_pages);
43647
43648 - fscache_stat(&fscache_n_retrievals);
43649 + fscache_stat_unchecked(&fscache_n_retrievals);
43650
43651 if (hlist_empty(&cookie->backing_objects))
43652 goto nobufs;
43653 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43654 goto nobufs_unlock;
43655 spin_unlock(&cookie->lock);
43656
43657 - fscache_stat(&fscache_n_retrieval_ops);
43658 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43659
43660 /* pin the netfs read context in case we need to do the actual netfs
43661 * read because we've encountered a cache read failure */
43662 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43663
43664 error:
43665 if (ret == -ENOMEM)
43666 - fscache_stat(&fscache_n_retrievals_nomem);
43667 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43668 else if (ret == -ERESTARTSYS)
43669 - fscache_stat(&fscache_n_retrievals_intr);
43670 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43671 else if (ret == -ENODATA)
43672 - fscache_stat(&fscache_n_retrievals_nodata);
43673 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43674 else if (ret < 0)
43675 - fscache_stat(&fscache_n_retrievals_nobufs);
43676 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43677 else
43678 - fscache_stat(&fscache_n_retrievals_ok);
43679 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43680
43681 fscache_put_retrieval(op);
43682 _leave(" = %d", ret);
43683 @@ -545,7 +545,7 @@ nobufs_unlock:
43684 spin_unlock(&cookie->lock);
43685 kfree(op);
43686 nobufs:
43687 - fscache_stat(&fscache_n_retrievals_nobufs);
43688 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43689 _leave(" = -ENOBUFS");
43690 return -ENOBUFS;
43691 }
43692 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43693
43694 _enter("%p,%p,,,", cookie, page);
43695
43696 - fscache_stat(&fscache_n_allocs);
43697 + fscache_stat_unchecked(&fscache_n_allocs);
43698
43699 if (hlist_empty(&cookie->backing_objects))
43700 goto nobufs;
43701 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43702 goto nobufs_unlock;
43703 spin_unlock(&cookie->lock);
43704
43705 - fscache_stat(&fscache_n_alloc_ops);
43706 + fscache_stat_unchecked(&fscache_n_alloc_ops);
43707
43708 ret = fscache_wait_for_retrieval_activation(
43709 object, op,
43710 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43711
43712 error:
43713 if (ret == -ERESTARTSYS)
43714 - fscache_stat(&fscache_n_allocs_intr);
43715 + fscache_stat_unchecked(&fscache_n_allocs_intr);
43716 else if (ret < 0)
43717 - fscache_stat(&fscache_n_allocs_nobufs);
43718 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43719 else
43720 - fscache_stat(&fscache_n_allocs_ok);
43721 + fscache_stat_unchecked(&fscache_n_allocs_ok);
43722
43723 fscache_put_retrieval(op);
43724 _leave(" = %d", ret);
43725 @@ -625,7 +625,7 @@ nobufs_unlock:
43726 spin_unlock(&cookie->lock);
43727 kfree(op);
43728 nobufs:
43729 - fscache_stat(&fscache_n_allocs_nobufs);
43730 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43731 _leave(" = -ENOBUFS");
43732 return -ENOBUFS;
43733 }
43734 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43735
43736 spin_lock(&cookie->stores_lock);
43737
43738 - fscache_stat(&fscache_n_store_calls);
43739 + fscache_stat_unchecked(&fscache_n_store_calls);
43740
43741 /* find a page to store */
43742 page = NULL;
43743 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43744 page = results[0];
43745 _debug("gang %d [%lx]", n, page->index);
43746 if (page->index > op->store_limit) {
43747 - fscache_stat(&fscache_n_store_pages_over_limit);
43748 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43749 goto superseded;
43750 }
43751
43752 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43753 spin_unlock(&cookie->stores_lock);
43754 spin_unlock(&object->lock);
43755
43756 - fscache_stat(&fscache_n_store_pages);
43757 + fscache_stat_unchecked(&fscache_n_store_pages);
43758 fscache_stat(&fscache_n_cop_write_page);
43759 ret = object->cache->ops->write_page(op, page);
43760 fscache_stat_d(&fscache_n_cop_write_page);
43761 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43762 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43763 ASSERT(PageFsCache(page));
43764
43765 - fscache_stat(&fscache_n_stores);
43766 + fscache_stat_unchecked(&fscache_n_stores);
43767
43768 op = kzalloc(sizeof(*op), GFP_NOIO);
43769 if (!op)
43770 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43771 spin_unlock(&cookie->stores_lock);
43772 spin_unlock(&object->lock);
43773
43774 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43775 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43776 op->store_limit = object->store_limit;
43777
43778 if (fscache_submit_op(object, &op->op) < 0)
43779 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43780
43781 spin_unlock(&cookie->lock);
43782 radix_tree_preload_end();
43783 - fscache_stat(&fscache_n_store_ops);
43784 - fscache_stat(&fscache_n_stores_ok);
43785 + fscache_stat_unchecked(&fscache_n_store_ops);
43786 + fscache_stat_unchecked(&fscache_n_stores_ok);
43787
43788 /* the work queue now carries its own ref on the object */
43789 fscache_put_operation(&op->op);
43790 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43791 return 0;
43792
43793 already_queued:
43794 - fscache_stat(&fscache_n_stores_again);
43795 + fscache_stat_unchecked(&fscache_n_stores_again);
43796 already_pending:
43797 spin_unlock(&cookie->stores_lock);
43798 spin_unlock(&object->lock);
43799 spin_unlock(&cookie->lock);
43800 radix_tree_preload_end();
43801 kfree(op);
43802 - fscache_stat(&fscache_n_stores_ok);
43803 + fscache_stat_unchecked(&fscache_n_stores_ok);
43804 _leave(" = 0");
43805 return 0;
43806
43807 @@ -851,14 +851,14 @@ nobufs:
43808 spin_unlock(&cookie->lock);
43809 radix_tree_preload_end();
43810 kfree(op);
43811 - fscache_stat(&fscache_n_stores_nobufs);
43812 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
43813 _leave(" = -ENOBUFS");
43814 return -ENOBUFS;
43815
43816 nomem_free:
43817 kfree(op);
43818 nomem:
43819 - fscache_stat(&fscache_n_stores_oom);
43820 + fscache_stat_unchecked(&fscache_n_stores_oom);
43821 _leave(" = -ENOMEM");
43822 return -ENOMEM;
43823 }
43824 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43825 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43826 ASSERTCMP(page, !=, NULL);
43827
43828 - fscache_stat(&fscache_n_uncaches);
43829 + fscache_stat_unchecked(&fscache_n_uncaches);
43830
43831 /* cache withdrawal may beat us to it */
43832 if (!PageFsCache(page))
43833 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43834 unsigned long loop;
43835
43836 #ifdef CONFIG_FSCACHE_STATS
43837 - atomic_add(pagevec->nr, &fscache_n_marks);
43838 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43839 #endif
43840
43841 for (loop = 0; loop < pagevec->nr; loop++) {
43842 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43843 index 4765190..2a067f2 100644
43844 --- a/fs/fscache/stats.c
43845 +++ b/fs/fscache/stats.c
43846 @@ -18,95 +18,95 @@
43847 /*
43848 * operation counters
43849 */
43850 -atomic_t fscache_n_op_pend;
43851 -atomic_t fscache_n_op_run;
43852 -atomic_t fscache_n_op_enqueue;
43853 -atomic_t fscache_n_op_requeue;
43854 -atomic_t fscache_n_op_deferred_release;
43855 -atomic_t fscache_n_op_release;
43856 -atomic_t fscache_n_op_gc;
43857 -atomic_t fscache_n_op_cancelled;
43858 -atomic_t fscache_n_op_rejected;
43859 +atomic_unchecked_t fscache_n_op_pend;
43860 +atomic_unchecked_t fscache_n_op_run;
43861 +atomic_unchecked_t fscache_n_op_enqueue;
43862 +atomic_unchecked_t fscache_n_op_requeue;
43863 +atomic_unchecked_t fscache_n_op_deferred_release;
43864 +atomic_unchecked_t fscache_n_op_release;
43865 +atomic_unchecked_t fscache_n_op_gc;
43866 +atomic_unchecked_t fscache_n_op_cancelled;
43867 +atomic_unchecked_t fscache_n_op_rejected;
43868
43869 -atomic_t fscache_n_attr_changed;
43870 -atomic_t fscache_n_attr_changed_ok;
43871 -atomic_t fscache_n_attr_changed_nobufs;
43872 -atomic_t fscache_n_attr_changed_nomem;
43873 -atomic_t fscache_n_attr_changed_calls;
43874 +atomic_unchecked_t fscache_n_attr_changed;
43875 +atomic_unchecked_t fscache_n_attr_changed_ok;
43876 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
43877 +atomic_unchecked_t fscache_n_attr_changed_nomem;
43878 +atomic_unchecked_t fscache_n_attr_changed_calls;
43879
43880 -atomic_t fscache_n_allocs;
43881 -atomic_t fscache_n_allocs_ok;
43882 -atomic_t fscache_n_allocs_wait;
43883 -atomic_t fscache_n_allocs_nobufs;
43884 -atomic_t fscache_n_allocs_intr;
43885 -atomic_t fscache_n_allocs_object_dead;
43886 -atomic_t fscache_n_alloc_ops;
43887 -atomic_t fscache_n_alloc_op_waits;
43888 +atomic_unchecked_t fscache_n_allocs;
43889 +atomic_unchecked_t fscache_n_allocs_ok;
43890 +atomic_unchecked_t fscache_n_allocs_wait;
43891 +atomic_unchecked_t fscache_n_allocs_nobufs;
43892 +atomic_unchecked_t fscache_n_allocs_intr;
43893 +atomic_unchecked_t fscache_n_allocs_object_dead;
43894 +atomic_unchecked_t fscache_n_alloc_ops;
43895 +atomic_unchecked_t fscache_n_alloc_op_waits;
43896
43897 -atomic_t fscache_n_retrievals;
43898 -atomic_t fscache_n_retrievals_ok;
43899 -atomic_t fscache_n_retrievals_wait;
43900 -atomic_t fscache_n_retrievals_nodata;
43901 -atomic_t fscache_n_retrievals_nobufs;
43902 -atomic_t fscache_n_retrievals_intr;
43903 -atomic_t fscache_n_retrievals_nomem;
43904 -atomic_t fscache_n_retrievals_object_dead;
43905 -atomic_t fscache_n_retrieval_ops;
43906 -atomic_t fscache_n_retrieval_op_waits;
43907 +atomic_unchecked_t fscache_n_retrievals;
43908 +atomic_unchecked_t fscache_n_retrievals_ok;
43909 +atomic_unchecked_t fscache_n_retrievals_wait;
43910 +atomic_unchecked_t fscache_n_retrievals_nodata;
43911 +atomic_unchecked_t fscache_n_retrievals_nobufs;
43912 +atomic_unchecked_t fscache_n_retrievals_intr;
43913 +atomic_unchecked_t fscache_n_retrievals_nomem;
43914 +atomic_unchecked_t fscache_n_retrievals_object_dead;
43915 +atomic_unchecked_t fscache_n_retrieval_ops;
43916 +atomic_unchecked_t fscache_n_retrieval_op_waits;
43917
43918 -atomic_t fscache_n_stores;
43919 -atomic_t fscache_n_stores_ok;
43920 -atomic_t fscache_n_stores_again;
43921 -atomic_t fscache_n_stores_nobufs;
43922 -atomic_t fscache_n_stores_oom;
43923 -atomic_t fscache_n_store_ops;
43924 -atomic_t fscache_n_store_calls;
43925 -atomic_t fscache_n_store_pages;
43926 -atomic_t fscache_n_store_radix_deletes;
43927 -atomic_t fscache_n_store_pages_over_limit;
43928 +atomic_unchecked_t fscache_n_stores;
43929 +atomic_unchecked_t fscache_n_stores_ok;
43930 +atomic_unchecked_t fscache_n_stores_again;
43931 +atomic_unchecked_t fscache_n_stores_nobufs;
43932 +atomic_unchecked_t fscache_n_stores_oom;
43933 +atomic_unchecked_t fscache_n_store_ops;
43934 +atomic_unchecked_t fscache_n_store_calls;
43935 +atomic_unchecked_t fscache_n_store_pages;
43936 +atomic_unchecked_t fscache_n_store_radix_deletes;
43937 +atomic_unchecked_t fscache_n_store_pages_over_limit;
43938
43939 -atomic_t fscache_n_store_vmscan_not_storing;
43940 -atomic_t fscache_n_store_vmscan_gone;
43941 -atomic_t fscache_n_store_vmscan_busy;
43942 -atomic_t fscache_n_store_vmscan_cancelled;
43943 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43944 +atomic_unchecked_t fscache_n_store_vmscan_gone;
43945 +atomic_unchecked_t fscache_n_store_vmscan_busy;
43946 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43947
43948 -atomic_t fscache_n_marks;
43949 -atomic_t fscache_n_uncaches;
43950 +atomic_unchecked_t fscache_n_marks;
43951 +atomic_unchecked_t fscache_n_uncaches;
43952
43953 -atomic_t fscache_n_acquires;
43954 -atomic_t fscache_n_acquires_null;
43955 -atomic_t fscache_n_acquires_no_cache;
43956 -atomic_t fscache_n_acquires_ok;
43957 -atomic_t fscache_n_acquires_nobufs;
43958 -atomic_t fscache_n_acquires_oom;
43959 +atomic_unchecked_t fscache_n_acquires;
43960 +atomic_unchecked_t fscache_n_acquires_null;
43961 +atomic_unchecked_t fscache_n_acquires_no_cache;
43962 +atomic_unchecked_t fscache_n_acquires_ok;
43963 +atomic_unchecked_t fscache_n_acquires_nobufs;
43964 +atomic_unchecked_t fscache_n_acquires_oom;
43965
43966 -atomic_t fscache_n_updates;
43967 -atomic_t fscache_n_updates_null;
43968 -atomic_t fscache_n_updates_run;
43969 +atomic_unchecked_t fscache_n_updates;
43970 +atomic_unchecked_t fscache_n_updates_null;
43971 +atomic_unchecked_t fscache_n_updates_run;
43972
43973 -atomic_t fscache_n_relinquishes;
43974 -atomic_t fscache_n_relinquishes_null;
43975 -atomic_t fscache_n_relinquishes_waitcrt;
43976 -atomic_t fscache_n_relinquishes_retire;
43977 +atomic_unchecked_t fscache_n_relinquishes;
43978 +atomic_unchecked_t fscache_n_relinquishes_null;
43979 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43980 +atomic_unchecked_t fscache_n_relinquishes_retire;
43981
43982 -atomic_t fscache_n_cookie_index;
43983 -atomic_t fscache_n_cookie_data;
43984 -atomic_t fscache_n_cookie_special;
43985 +atomic_unchecked_t fscache_n_cookie_index;
43986 +atomic_unchecked_t fscache_n_cookie_data;
43987 +atomic_unchecked_t fscache_n_cookie_special;
43988
43989 -atomic_t fscache_n_object_alloc;
43990 -atomic_t fscache_n_object_no_alloc;
43991 -atomic_t fscache_n_object_lookups;
43992 -atomic_t fscache_n_object_lookups_negative;
43993 -atomic_t fscache_n_object_lookups_positive;
43994 -atomic_t fscache_n_object_lookups_timed_out;
43995 -atomic_t fscache_n_object_created;
43996 -atomic_t fscache_n_object_avail;
43997 -atomic_t fscache_n_object_dead;
43998 +atomic_unchecked_t fscache_n_object_alloc;
43999 +atomic_unchecked_t fscache_n_object_no_alloc;
44000 +atomic_unchecked_t fscache_n_object_lookups;
44001 +atomic_unchecked_t fscache_n_object_lookups_negative;
44002 +atomic_unchecked_t fscache_n_object_lookups_positive;
44003 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
44004 +atomic_unchecked_t fscache_n_object_created;
44005 +atomic_unchecked_t fscache_n_object_avail;
44006 +atomic_unchecked_t fscache_n_object_dead;
44007
44008 -atomic_t fscache_n_checkaux_none;
44009 -atomic_t fscache_n_checkaux_okay;
44010 -atomic_t fscache_n_checkaux_update;
44011 -atomic_t fscache_n_checkaux_obsolete;
44012 +atomic_unchecked_t fscache_n_checkaux_none;
44013 +atomic_unchecked_t fscache_n_checkaux_okay;
44014 +atomic_unchecked_t fscache_n_checkaux_update;
44015 +atomic_unchecked_t fscache_n_checkaux_obsolete;
44016
44017 atomic_t fscache_n_cop_alloc_object;
44018 atomic_t fscache_n_cop_lookup_object;
44019 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
44020 seq_puts(m, "FS-Cache statistics\n");
44021
44022 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
44023 - atomic_read(&fscache_n_cookie_index),
44024 - atomic_read(&fscache_n_cookie_data),
44025 - atomic_read(&fscache_n_cookie_special));
44026 + atomic_read_unchecked(&fscache_n_cookie_index),
44027 + atomic_read_unchecked(&fscache_n_cookie_data),
44028 + atomic_read_unchecked(&fscache_n_cookie_special));
44029
44030 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
44031 - atomic_read(&fscache_n_object_alloc),
44032 - atomic_read(&fscache_n_object_no_alloc),
44033 - atomic_read(&fscache_n_object_avail),
44034 - atomic_read(&fscache_n_object_dead));
44035 + atomic_read_unchecked(&fscache_n_object_alloc),
44036 + atomic_read_unchecked(&fscache_n_object_no_alloc),
44037 + atomic_read_unchecked(&fscache_n_object_avail),
44038 + atomic_read_unchecked(&fscache_n_object_dead));
44039 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
44040 - atomic_read(&fscache_n_checkaux_none),
44041 - atomic_read(&fscache_n_checkaux_okay),
44042 - atomic_read(&fscache_n_checkaux_update),
44043 - atomic_read(&fscache_n_checkaux_obsolete));
44044 + atomic_read_unchecked(&fscache_n_checkaux_none),
44045 + atomic_read_unchecked(&fscache_n_checkaux_okay),
44046 + atomic_read_unchecked(&fscache_n_checkaux_update),
44047 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
44048
44049 seq_printf(m, "Pages : mrk=%u unc=%u\n",
44050 - atomic_read(&fscache_n_marks),
44051 - atomic_read(&fscache_n_uncaches));
44052 + atomic_read_unchecked(&fscache_n_marks),
44053 + atomic_read_unchecked(&fscache_n_uncaches));
44054
44055 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
44056 " oom=%u\n",
44057 - atomic_read(&fscache_n_acquires),
44058 - atomic_read(&fscache_n_acquires_null),
44059 - atomic_read(&fscache_n_acquires_no_cache),
44060 - atomic_read(&fscache_n_acquires_ok),
44061 - atomic_read(&fscache_n_acquires_nobufs),
44062 - atomic_read(&fscache_n_acquires_oom));
44063 + atomic_read_unchecked(&fscache_n_acquires),
44064 + atomic_read_unchecked(&fscache_n_acquires_null),
44065 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
44066 + atomic_read_unchecked(&fscache_n_acquires_ok),
44067 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
44068 + atomic_read_unchecked(&fscache_n_acquires_oom));
44069
44070 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
44071 - atomic_read(&fscache_n_object_lookups),
44072 - atomic_read(&fscache_n_object_lookups_negative),
44073 - atomic_read(&fscache_n_object_lookups_positive),
44074 - atomic_read(&fscache_n_object_created),
44075 - atomic_read(&fscache_n_object_lookups_timed_out));
44076 + atomic_read_unchecked(&fscache_n_object_lookups),
44077 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
44078 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
44079 + atomic_read_unchecked(&fscache_n_object_created),
44080 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
44081
44082 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
44083 - atomic_read(&fscache_n_updates),
44084 - atomic_read(&fscache_n_updates_null),
44085 - atomic_read(&fscache_n_updates_run));
44086 + atomic_read_unchecked(&fscache_n_updates),
44087 + atomic_read_unchecked(&fscache_n_updates_null),
44088 + atomic_read_unchecked(&fscache_n_updates_run));
44089
44090 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
44091 - atomic_read(&fscache_n_relinquishes),
44092 - atomic_read(&fscache_n_relinquishes_null),
44093 - atomic_read(&fscache_n_relinquishes_waitcrt),
44094 - atomic_read(&fscache_n_relinquishes_retire));
44095 + atomic_read_unchecked(&fscache_n_relinquishes),
44096 + atomic_read_unchecked(&fscache_n_relinquishes_null),
44097 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
44098 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
44099
44100 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
44101 - atomic_read(&fscache_n_attr_changed),
44102 - atomic_read(&fscache_n_attr_changed_ok),
44103 - atomic_read(&fscache_n_attr_changed_nobufs),
44104 - atomic_read(&fscache_n_attr_changed_nomem),
44105 - atomic_read(&fscache_n_attr_changed_calls));
44106 + atomic_read_unchecked(&fscache_n_attr_changed),
44107 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
44108 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
44109 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
44110 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
44111
44112 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
44113 - atomic_read(&fscache_n_allocs),
44114 - atomic_read(&fscache_n_allocs_ok),
44115 - atomic_read(&fscache_n_allocs_wait),
44116 - atomic_read(&fscache_n_allocs_nobufs),
44117 - atomic_read(&fscache_n_allocs_intr));
44118 + atomic_read_unchecked(&fscache_n_allocs),
44119 + atomic_read_unchecked(&fscache_n_allocs_ok),
44120 + atomic_read_unchecked(&fscache_n_allocs_wait),
44121 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
44122 + atomic_read_unchecked(&fscache_n_allocs_intr));
44123 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
44124 - atomic_read(&fscache_n_alloc_ops),
44125 - atomic_read(&fscache_n_alloc_op_waits),
44126 - atomic_read(&fscache_n_allocs_object_dead));
44127 + atomic_read_unchecked(&fscache_n_alloc_ops),
44128 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
44129 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
44130
44131 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
44132 " int=%u oom=%u\n",
44133 - atomic_read(&fscache_n_retrievals),
44134 - atomic_read(&fscache_n_retrievals_ok),
44135 - atomic_read(&fscache_n_retrievals_wait),
44136 - atomic_read(&fscache_n_retrievals_nodata),
44137 - atomic_read(&fscache_n_retrievals_nobufs),
44138 - atomic_read(&fscache_n_retrievals_intr),
44139 - atomic_read(&fscache_n_retrievals_nomem));
44140 + atomic_read_unchecked(&fscache_n_retrievals),
44141 + atomic_read_unchecked(&fscache_n_retrievals_ok),
44142 + atomic_read_unchecked(&fscache_n_retrievals_wait),
44143 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
44144 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
44145 + atomic_read_unchecked(&fscache_n_retrievals_intr),
44146 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
44147 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
44148 - atomic_read(&fscache_n_retrieval_ops),
44149 - atomic_read(&fscache_n_retrieval_op_waits),
44150 - atomic_read(&fscache_n_retrievals_object_dead));
44151 + atomic_read_unchecked(&fscache_n_retrieval_ops),
44152 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
44153 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
44154
44155 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
44156 - atomic_read(&fscache_n_stores),
44157 - atomic_read(&fscache_n_stores_ok),
44158 - atomic_read(&fscache_n_stores_again),
44159 - atomic_read(&fscache_n_stores_nobufs),
44160 - atomic_read(&fscache_n_stores_oom));
44161 + atomic_read_unchecked(&fscache_n_stores),
44162 + atomic_read_unchecked(&fscache_n_stores_ok),
44163 + atomic_read_unchecked(&fscache_n_stores_again),
44164 + atomic_read_unchecked(&fscache_n_stores_nobufs),
44165 + atomic_read_unchecked(&fscache_n_stores_oom));
44166 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
44167 - atomic_read(&fscache_n_store_ops),
44168 - atomic_read(&fscache_n_store_calls),
44169 - atomic_read(&fscache_n_store_pages),
44170 - atomic_read(&fscache_n_store_radix_deletes),
44171 - atomic_read(&fscache_n_store_pages_over_limit));
44172 + atomic_read_unchecked(&fscache_n_store_ops),
44173 + atomic_read_unchecked(&fscache_n_store_calls),
44174 + atomic_read_unchecked(&fscache_n_store_pages),
44175 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
44176 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
44177
44178 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
44179 - atomic_read(&fscache_n_store_vmscan_not_storing),
44180 - atomic_read(&fscache_n_store_vmscan_gone),
44181 - atomic_read(&fscache_n_store_vmscan_busy),
44182 - atomic_read(&fscache_n_store_vmscan_cancelled));
44183 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
44184 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
44185 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
44186 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
44187
44188 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
44189 - atomic_read(&fscache_n_op_pend),
44190 - atomic_read(&fscache_n_op_run),
44191 - atomic_read(&fscache_n_op_enqueue),
44192 - atomic_read(&fscache_n_op_cancelled),
44193 - atomic_read(&fscache_n_op_rejected));
44194 + atomic_read_unchecked(&fscache_n_op_pend),
44195 + atomic_read_unchecked(&fscache_n_op_run),
44196 + atomic_read_unchecked(&fscache_n_op_enqueue),
44197 + atomic_read_unchecked(&fscache_n_op_cancelled),
44198 + atomic_read_unchecked(&fscache_n_op_rejected));
44199 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
44200 - atomic_read(&fscache_n_op_deferred_release),
44201 - atomic_read(&fscache_n_op_release),
44202 - atomic_read(&fscache_n_op_gc));
44203 + atomic_read_unchecked(&fscache_n_op_deferred_release),
44204 + atomic_read_unchecked(&fscache_n_op_release),
44205 + atomic_read_unchecked(&fscache_n_op_gc));
44206
44207 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
44208 atomic_read(&fscache_n_cop_alloc_object),
44209 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
44210 index 3426521..3b75162 100644
44211 --- a/fs/fuse/cuse.c
44212 +++ b/fs/fuse/cuse.c
44213 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
44214 INIT_LIST_HEAD(&cuse_conntbl[i]);
44215
44216 /* inherit and extend fuse_dev_operations */
44217 - cuse_channel_fops = fuse_dev_operations;
44218 - cuse_channel_fops.owner = THIS_MODULE;
44219 - cuse_channel_fops.open = cuse_channel_open;
44220 - cuse_channel_fops.release = cuse_channel_release;
44221 + pax_open_kernel();
44222 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
44223 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
44224 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
44225 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
44226 + pax_close_kernel();
44227
44228 cuse_class = class_create(THIS_MODULE, "cuse");
44229 if (IS_ERR(cuse_class))
44230 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
44231 index 2aaf3ea..8e50863 100644
44232 --- a/fs/fuse/dev.c
44233 +++ b/fs/fuse/dev.c
44234 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
44235 ret = 0;
44236 pipe_lock(pipe);
44237
44238 - if (!pipe->readers) {
44239 + if (!atomic_read(&pipe->readers)) {
44240 send_sig(SIGPIPE, current, 0);
44241 if (!ret)
44242 ret = -EPIPE;
44243 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
44244 index 9f63e49..d8a64c0 100644
44245 --- a/fs/fuse/dir.c
44246 +++ b/fs/fuse/dir.c
44247 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
44248 return link;
44249 }
44250
44251 -static void free_link(char *link)
44252 +static void free_link(const char *link)
44253 {
44254 if (!IS_ERR(link))
44255 free_page((unsigned long) link);
44256 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
44257 index cfd4959..a780959 100644
44258 --- a/fs/gfs2/inode.c
44259 +++ b/fs/gfs2/inode.c
44260 @@ -1490,7 +1490,7 @@ out:
44261
44262 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44263 {
44264 - char *s = nd_get_link(nd);
44265 + const char *s = nd_get_link(nd);
44266 if (!IS_ERR(s))
44267 kfree(s);
44268 }
44269 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
44270 index 0be5a78..9cfb853 100644
44271 --- a/fs/hugetlbfs/inode.c
44272 +++ b/fs/hugetlbfs/inode.c
44273 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
44274 .kill_sb = kill_litter_super,
44275 };
44276
44277 -static struct vfsmount *hugetlbfs_vfsmount;
44278 +struct vfsmount *hugetlbfs_vfsmount;
44279
44280 static int can_do_hugetlb_shm(void)
44281 {
44282 diff --git a/fs/inode.c b/fs/inode.c
44283 index ee4e66b..0451521 100644
44284 --- a/fs/inode.c
44285 +++ b/fs/inode.c
44286 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44287
44288 #ifdef CONFIG_SMP
44289 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44290 - static atomic_t shared_last_ino;
44291 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44292 + static atomic_unchecked_t shared_last_ino;
44293 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44294
44295 res = next - LAST_INO_BATCH;
44296 }
44297 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
44298 index e513f19..2ab1351 100644
44299 --- a/fs/jffs2/erase.c
44300 +++ b/fs/jffs2/erase.c
44301 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
44302 struct jffs2_unknown_node marker = {
44303 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44304 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44305 - .totlen = cpu_to_je32(c->cleanmarker_size)
44306 + .totlen = cpu_to_je32(c->cleanmarker_size),
44307 + .hdr_crc = cpu_to_je32(0)
44308 };
44309
44310 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44311 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
44312 index b09e51d..e482afa 100644
44313 --- a/fs/jffs2/wbuf.c
44314 +++ b/fs/jffs2/wbuf.c
44315 @@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
44316 {
44317 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44318 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44319 - .totlen = constant_cpu_to_je32(8)
44320 + .totlen = constant_cpu_to_je32(8),
44321 + .hdr_crc = constant_cpu_to_je32(0)
44322 };
44323
44324 /*
44325 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
44326 index a44eff0..462e07d 100644
44327 --- a/fs/jfs/super.c
44328 +++ b/fs/jfs/super.c
44329 @@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
44330
44331 jfs_inode_cachep =
44332 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44333 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44334 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44335 init_once);
44336 if (jfs_inode_cachep == NULL)
44337 return -ENOMEM;
44338 diff --git a/fs/libfs.c b/fs/libfs.c
44339 index f6d411e..e82a08d 100644
44340 --- a/fs/libfs.c
44341 +++ b/fs/libfs.c
44342 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44343
44344 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44345 struct dentry *next;
44346 + char d_name[sizeof(next->d_iname)];
44347 + const unsigned char *name;
44348 +
44349 next = list_entry(p, struct dentry, d_u.d_child);
44350 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44351 if (!simple_positive(next)) {
44352 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44353
44354 spin_unlock(&next->d_lock);
44355 spin_unlock(&dentry->d_lock);
44356 - if (filldir(dirent, next->d_name.name,
44357 + name = next->d_name.name;
44358 + if (name == next->d_iname) {
44359 + memcpy(d_name, name, next->d_name.len);
44360 + name = d_name;
44361 + }
44362 + if (filldir(dirent, name,
44363 next->d_name.len, filp->f_pos,
44364 next->d_inode->i_ino,
44365 dt_type(next->d_inode)) < 0)
44366 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
44367 index 8392cb8..80d6193 100644
44368 --- a/fs/lockd/clntproc.c
44369 +++ b/fs/lockd/clntproc.c
44370 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
44371 /*
44372 * Cookie counter for NLM requests
44373 */
44374 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44375 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44376
44377 void nlmclnt_next_cookie(struct nlm_cookie *c)
44378 {
44379 - u32 cookie = atomic_inc_return(&nlm_cookie);
44380 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44381
44382 memcpy(c->data, &cookie, 4);
44383 c->len=4;
44384 diff --git a/fs/locks.c b/fs/locks.c
44385 index 637694b..f84a121 100644
44386 --- a/fs/locks.c
44387 +++ b/fs/locks.c
44388 @@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44389 return;
44390
44391 if (filp->f_op && filp->f_op->flock) {
44392 - struct file_lock fl = {
44393 + struct file_lock flock = {
44394 .fl_pid = current->tgid,
44395 .fl_file = filp,
44396 .fl_flags = FL_FLOCK,
44397 .fl_type = F_UNLCK,
44398 .fl_end = OFFSET_MAX,
44399 };
44400 - filp->f_op->flock(filp, F_SETLKW, &fl);
44401 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
44402 - fl.fl_ops->fl_release_private(&fl);
44403 + filp->f_op->flock(filp, F_SETLKW, &flock);
44404 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
44405 + flock.fl_ops->fl_release_private(&flock);
44406 }
44407
44408 lock_flocks();
44409 diff --git a/fs/namei.c b/fs/namei.c
44410 index 5008f01..90328a7 100644
44411 --- a/fs/namei.c
44412 +++ b/fs/namei.c
44413 @@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44414 if (ret != -EACCES)
44415 return ret;
44416
44417 +#ifdef CONFIG_GRKERNSEC
44418 + /* we'll block if we have to log due to a denied capability use */
44419 + if (mask & MAY_NOT_BLOCK)
44420 + return -ECHILD;
44421 +#endif
44422 +
44423 if (S_ISDIR(inode->i_mode)) {
44424 /* DACs are overridable for directories */
44425 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44426 - return 0;
44427 if (!(mask & MAY_WRITE))
44428 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44429 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44430 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44431 return 0;
44432 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44433 + return 0;
44434 return -EACCES;
44435 }
44436 /*
44437 + * Searching includes executable on directories, else just read.
44438 + */
44439 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44440 + if (mask == MAY_READ)
44441 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44442 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44443 + return 0;
44444 +
44445 + /*
44446 * Read/write DACs are always overridable.
44447 * Executable DACs are overridable when there is
44448 * at least one exec bit set.
44449 @@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44450 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44451 return 0;
44452
44453 - /*
44454 - * Searching includes executable on directories, else just read.
44455 - */
44456 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44457 - if (mask == MAY_READ)
44458 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44459 - return 0;
44460 -
44461 return -EACCES;
44462 }
44463
44464 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44465 return error;
44466 }
44467
44468 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
44469 + dentry->d_inode, dentry, nd->path.mnt)) {
44470 + error = -EACCES;
44471 + *p = ERR_PTR(error); /* no ->put_link(), please */
44472 + path_put(&nd->path);
44473 + return error;
44474 + }
44475 +
44476 nd->last_type = LAST_BIND;
44477 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44478 error = PTR_ERR(*p);
44479 if (!IS_ERR(*p)) {
44480 - char *s = nd_get_link(nd);
44481 + const char *s = nd_get_link(nd);
44482 error = 0;
44483 if (s)
44484 error = __vfs_follow_link(nd, s);
44485 @@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
44486 if (!err)
44487 err = complete_walk(nd);
44488
44489 + if (!(nd->flags & LOOKUP_PARENT)) {
44490 +#ifdef CONFIG_GRKERNSEC
44491 + if (flags & LOOKUP_RCU) {
44492 + if (!err)
44493 + path_put(&nd->path);
44494 + err = -ECHILD;
44495 + } else
44496 +#endif
44497 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44498 + if (!err)
44499 + path_put(&nd->path);
44500 + err = -ENOENT;
44501 + }
44502 + }
44503 +
44504 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44505 if (!nd->inode->i_op->lookup) {
44506 path_put(&nd->path);
44507 @@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
44508 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44509
44510 if (likely(!retval)) {
44511 + if (*name != '/' && nd->path.dentry && nd->inode) {
44512 +#ifdef CONFIG_GRKERNSEC
44513 + if (flags & LOOKUP_RCU)
44514 + return -ECHILD;
44515 +#endif
44516 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44517 + return -ENOENT;
44518 + }
44519 +
44520 if (unlikely(!audit_dummy_context())) {
44521 if (nd->path.dentry && nd->inode)
44522 audit_inode(name, nd->path.dentry);
44523 @@ -2046,6 +2086,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44524 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44525 return -EPERM;
44526
44527 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44528 + return -EPERM;
44529 + if (gr_handle_rawio(inode))
44530 + return -EPERM;
44531 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44532 + return -EACCES;
44533 +
44534 return 0;
44535 }
44536
44537 @@ -2107,6 +2154,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44538 error = complete_walk(nd);
44539 if (error)
44540 return ERR_PTR(error);
44541 +#ifdef CONFIG_GRKERNSEC
44542 + if (nd->flags & LOOKUP_RCU) {
44543 + error = -ECHILD;
44544 + goto exit;
44545 + }
44546 +#endif
44547 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44548 + error = -ENOENT;
44549 + goto exit;
44550 + }
44551 audit_inode(pathname, nd->path.dentry);
44552 if (open_flag & O_CREAT) {
44553 error = -EISDIR;
44554 @@ -2117,6 +2174,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44555 error = complete_walk(nd);
44556 if (error)
44557 return ERR_PTR(error);
44558 +#ifdef CONFIG_GRKERNSEC
44559 + if (nd->flags & LOOKUP_RCU) {
44560 + error = -ECHILD;
44561 + goto exit;
44562 + }
44563 +#endif
44564 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44565 + error = -ENOENT;
44566 + goto exit;
44567 + }
44568 audit_inode(pathname, dir);
44569 goto ok;
44570 }
44571 @@ -2138,6 +2205,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44572 error = complete_walk(nd);
44573 if (error)
44574 return ERR_PTR(-ECHILD);
44575 +#ifdef CONFIG_GRKERNSEC
44576 + if (nd->flags & LOOKUP_RCU) {
44577 + error = -ECHILD;
44578 + goto exit;
44579 + }
44580 +#endif
44581 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44582 + error = -ENOENT;
44583 + goto exit;
44584 + }
44585
44586 error = -ENOTDIR;
44587 if (nd->flags & LOOKUP_DIRECTORY) {
44588 @@ -2178,6 +2255,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44589 /* Negative dentry, just create the file */
44590 if (!dentry->d_inode) {
44591 int mode = op->mode;
44592 +
44593 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44594 + error = -EACCES;
44595 + goto exit_mutex_unlock;
44596 + }
44597 +
44598 if (!IS_POSIXACL(dir->d_inode))
44599 mode &= ~current_umask();
44600 /*
44601 @@ -2201,6 +2284,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44602 error = vfs_create(dir->d_inode, dentry, mode, nd);
44603 if (error)
44604 goto exit_mutex_unlock;
44605 + else
44606 + gr_handle_create(path->dentry, path->mnt);
44607 mutex_unlock(&dir->d_inode->i_mutex);
44608 dput(nd->path.dentry);
44609 nd->path.dentry = dentry;
44610 @@ -2210,6 +2295,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44611 /*
44612 * It already exists.
44613 */
44614 +
44615 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44616 + error = -ENOENT;
44617 + goto exit_mutex_unlock;
44618 + }
44619 +
44620 + /* only check if O_CREAT is specified, all other checks need to go
44621 + into may_open */
44622 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44623 + error = -EACCES;
44624 + goto exit_mutex_unlock;
44625 + }
44626 +
44627 mutex_unlock(&dir->d_inode->i_mutex);
44628 audit_inode(pathname, path->dentry);
44629
44630 @@ -2422,6 +2520,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44631 *path = nd.path;
44632 return dentry;
44633 eexist:
44634 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44635 + dput(dentry);
44636 + dentry = ERR_PTR(-ENOENT);
44637 + goto fail;
44638 + }
44639 dput(dentry);
44640 dentry = ERR_PTR(-EEXIST);
44641 fail:
44642 @@ -2444,6 +2547,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44643 }
44644 EXPORT_SYMBOL(user_path_create);
44645
44646 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44647 +{
44648 + char *tmp = getname(pathname);
44649 + struct dentry *res;
44650 + if (IS_ERR(tmp))
44651 + return ERR_CAST(tmp);
44652 + res = kern_path_create(dfd, tmp, path, is_dir);
44653 + if (IS_ERR(res))
44654 + putname(tmp);
44655 + else
44656 + *to = tmp;
44657 + return res;
44658 +}
44659 +
44660 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44661 {
44662 int error = may_create(dir, dentry);
44663 @@ -2511,6 +2628,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44664 error = mnt_want_write(path.mnt);
44665 if (error)
44666 goto out_dput;
44667 +
44668 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44669 + error = -EPERM;
44670 + goto out_drop_write;
44671 + }
44672 +
44673 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44674 + error = -EACCES;
44675 + goto out_drop_write;
44676 + }
44677 +
44678 error = security_path_mknod(&path, dentry, mode, dev);
44679 if (error)
44680 goto out_drop_write;
44681 @@ -2528,6 +2656,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44682 }
44683 out_drop_write:
44684 mnt_drop_write(path.mnt);
44685 +
44686 + if (!error)
44687 + gr_handle_create(dentry, path.mnt);
44688 out_dput:
44689 dput(dentry);
44690 mutex_unlock(&path.dentry->d_inode->i_mutex);
44691 @@ -2577,12 +2708,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44692 error = mnt_want_write(path.mnt);
44693 if (error)
44694 goto out_dput;
44695 +
44696 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44697 + error = -EACCES;
44698 + goto out_drop_write;
44699 + }
44700 +
44701 error = security_path_mkdir(&path, dentry, mode);
44702 if (error)
44703 goto out_drop_write;
44704 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44705 out_drop_write:
44706 mnt_drop_write(path.mnt);
44707 +
44708 + if (!error)
44709 + gr_handle_create(dentry, path.mnt);
44710 out_dput:
44711 dput(dentry);
44712 mutex_unlock(&path.dentry->d_inode->i_mutex);
44713 @@ -2662,6 +2802,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44714 char * name;
44715 struct dentry *dentry;
44716 struct nameidata nd;
44717 + ino_t saved_ino = 0;
44718 + dev_t saved_dev = 0;
44719
44720 error = user_path_parent(dfd, pathname, &nd, &name);
44721 if (error)
44722 @@ -2690,6 +2832,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44723 error = -ENOENT;
44724 goto exit3;
44725 }
44726 +
44727 + saved_ino = dentry->d_inode->i_ino;
44728 + saved_dev = gr_get_dev_from_dentry(dentry);
44729 +
44730 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44731 + error = -EACCES;
44732 + goto exit3;
44733 + }
44734 +
44735 error = mnt_want_write(nd.path.mnt);
44736 if (error)
44737 goto exit3;
44738 @@ -2697,6 +2848,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44739 if (error)
44740 goto exit4;
44741 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44742 + if (!error && (saved_dev || saved_ino))
44743 + gr_handle_delete(saved_ino, saved_dev);
44744 exit4:
44745 mnt_drop_write(nd.path.mnt);
44746 exit3:
44747 @@ -2759,6 +2912,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44748 struct dentry *dentry;
44749 struct nameidata nd;
44750 struct inode *inode = NULL;
44751 + ino_t saved_ino = 0;
44752 + dev_t saved_dev = 0;
44753
44754 error = user_path_parent(dfd, pathname, &nd, &name);
44755 if (error)
44756 @@ -2781,6 +2936,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44757 if (!inode)
44758 goto slashes;
44759 ihold(inode);
44760 +
44761 + if (inode->i_nlink <= 1) {
44762 + saved_ino = inode->i_ino;
44763 + saved_dev = gr_get_dev_from_dentry(dentry);
44764 + }
44765 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44766 + error = -EACCES;
44767 + goto exit2;
44768 + }
44769 +
44770 error = mnt_want_write(nd.path.mnt);
44771 if (error)
44772 goto exit2;
44773 @@ -2788,6 +2953,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44774 if (error)
44775 goto exit3;
44776 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44777 + if (!error && (saved_ino || saved_dev))
44778 + gr_handle_delete(saved_ino, saved_dev);
44779 exit3:
44780 mnt_drop_write(nd.path.mnt);
44781 exit2:
44782 @@ -2863,10 +3030,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44783 error = mnt_want_write(path.mnt);
44784 if (error)
44785 goto out_dput;
44786 +
44787 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44788 + error = -EACCES;
44789 + goto out_drop_write;
44790 + }
44791 +
44792 error = security_path_symlink(&path, dentry, from);
44793 if (error)
44794 goto out_drop_write;
44795 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44796 + if (!error)
44797 + gr_handle_create(dentry, path.mnt);
44798 out_drop_write:
44799 mnt_drop_write(path.mnt);
44800 out_dput:
44801 @@ -2938,6 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44802 {
44803 struct dentry *new_dentry;
44804 struct path old_path, new_path;
44805 + char *to = NULL;
44806 int how = 0;
44807 int error;
44808
44809 @@ -2961,7 +3137,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44810 if (error)
44811 return error;
44812
44813 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44814 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44815 error = PTR_ERR(new_dentry);
44816 if (IS_ERR(new_dentry))
44817 goto out;
44818 @@ -2972,13 +3148,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44819 error = mnt_want_write(new_path.mnt);
44820 if (error)
44821 goto out_dput;
44822 +
44823 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44824 + old_path.dentry->d_inode,
44825 + old_path.dentry->d_inode->i_mode, to)) {
44826 + error = -EACCES;
44827 + goto out_drop_write;
44828 + }
44829 +
44830 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44831 + old_path.dentry, old_path.mnt, to)) {
44832 + error = -EACCES;
44833 + goto out_drop_write;
44834 + }
44835 +
44836 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44837 if (error)
44838 goto out_drop_write;
44839 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44840 + if (!error)
44841 + gr_handle_create(new_dentry, new_path.mnt);
44842 out_drop_write:
44843 mnt_drop_write(new_path.mnt);
44844 out_dput:
44845 + putname(to);
44846 dput(new_dentry);
44847 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44848 path_put(&new_path);
44849 @@ -3206,6 +3399,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44850 if (new_dentry == trap)
44851 goto exit5;
44852
44853 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44854 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44855 + to);
44856 + if (error)
44857 + goto exit5;
44858 +
44859 error = mnt_want_write(oldnd.path.mnt);
44860 if (error)
44861 goto exit5;
44862 @@ -3215,6 +3414,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44863 goto exit6;
44864 error = vfs_rename(old_dir->d_inode, old_dentry,
44865 new_dir->d_inode, new_dentry);
44866 + if (!error)
44867 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44868 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44869 exit6:
44870 mnt_drop_write(oldnd.path.mnt);
44871 exit5:
44872 @@ -3240,6 +3442,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44873
44874 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44875 {
44876 + char tmpbuf[64];
44877 + const char *newlink;
44878 int len;
44879
44880 len = PTR_ERR(link);
44881 @@ -3249,7 +3453,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44882 len = strlen(link);
44883 if (len > (unsigned) buflen)
44884 len = buflen;
44885 - if (copy_to_user(buffer, link, len))
44886 +
44887 + if (len < sizeof(tmpbuf)) {
44888 + memcpy(tmpbuf, link, len);
44889 + newlink = tmpbuf;
44890 + } else
44891 + newlink = link;
44892 +
44893 + if (copy_to_user(buffer, newlink, len))
44894 len = -EFAULT;
44895 out:
44896 return len;
44897 diff --git a/fs/namespace.c b/fs/namespace.c
44898 index cfc6d44..b4632a5 100644
44899 --- a/fs/namespace.c
44900 +++ b/fs/namespace.c
44901 @@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44902 if (!(sb->s_flags & MS_RDONLY))
44903 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44904 up_write(&sb->s_umount);
44905 +
44906 + gr_log_remount(mnt->mnt_devname, retval);
44907 +
44908 return retval;
44909 }
44910
44911 @@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44912 br_write_unlock(vfsmount_lock);
44913 up_write(&namespace_sem);
44914 release_mounts(&umount_list);
44915 +
44916 + gr_log_unmount(mnt->mnt_devname, retval);
44917 +
44918 return retval;
44919 }
44920
44921 @@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44922 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44923 MS_STRICTATIME);
44924
44925 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44926 + retval = -EPERM;
44927 + goto dput_out;
44928 + }
44929 +
44930 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44931 + retval = -EPERM;
44932 + goto dput_out;
44933 + }
44934 +
44935 if (flags & MS_REMOUNT)
44936 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44937 data_page);
44938 @@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44939 dev_name, data_page);
44940 dput_out:
44941 path_put(&path);
44942 +
44943 + gr_log_mount(dev_name, dir_name, retval);
44944 +
44945 return retval;
44946 }
44947
44948 @@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44949 if (error)
44950 goto out2;
44951
44952 + if (gr_handle_chroot_pivot()) {
44953 + error = -EPERM;
44954 + goto out2;
44955 + }
44956 +
44957 get_fs_root(current->fs, &root);
44958 error = lock_mount(&old);
44959 if (error)
44960 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44961 index 3db6b82..a57597e 100644
44962 --- a/fs/nfs/blocklayout/blocklayout.c
44963 +++ b/fs/nfs/blocklayout/blocklayout.c
44964 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44965 */
44966 struct parallel_io {
44967 struct kref refcnt;
44968 - struct rpc_call_ops call_ops;
44969 + rpc_call_ops_no_const call_ops;
44970 void (*pnfs_callback) (void *data);
44971 void *data;
44972 };
44973 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44974 index 50a15fa..ca113f9 100644
44975 --- a/fs/nfs/inode.c
44976 +++ b/fs/nfs/inode.c
44977 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44978 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44979 nfsi->attrtimeo_timestamp = jiffies;
44980
44981 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44982 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44983 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44984 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44985 else
44986 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44987 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44988 }
44989
44990 -static atomic_long_t nfs_attr_generation_counter;
44991 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44992
44993 static unsigned long nfs_read_attr_generation_counter(void)
44994 {
44995 - return atomic_long_read(&nfs_attr_generation_counter);
44996 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44997 }
44998
44999 unsigned long nfs_inc_attr_generation_counter(void)
45000 {
45001 - return atomic_long_inc_return(&nfs_attr_generation_counter);
45002 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
45003 }
45004
45005 void nfs_fattr_init(struct nfs_fattr *fattr)
45006 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
45007 index 7a2e442..8e544cc 100644
45008 --- a/fs/nfsd/vfs.c
45009 +++ b/fs/nfsd/vfs.c
45010 @@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
45011 } else {
45012 oldfs = get_fs();
45013 set_fs(KERNEL_DS);
45014 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
45015 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
45016 set_fs(oldfs);
45017 }
45018
45019 @@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
45020
45021 /* Write the data. */
45022 oldfs = get_fs(); set_fs(KERNEL_DS);
45023 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
45024 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
45025 set_fs(oldfs);
45026 if (host_err < 0)
45027 goto out_nfserr;
45028 @@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
45029 */
45030
45031 oldfs = get_fs(); set_fs(KERNEL_DS);
45032 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
45033 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
45034 set_fs(oldfs);
45035
45036 if (host_err < 0)
45037 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
45038 index 9fde1c0..14e8827 100644
45039 --- a/fs/notify/fanotify/fanotify_user.c
45040 +++ b/fs/notify/fanotify/fanotify_user.c
45041 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
45042 goto out_close_fd;
45043
45044 ret = -EFAULT;
45045 - if (copy_to_user(buf, &fanotify_event_metadata,
45046 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
45047 + copy_to_user(buf, &fanotify_event_metadata,
45048 fanotify_event_metadata.event_len))
45049 goto out_kill_access_response;
45050
45051 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
45052 index ee18815..7aa5d01 100644
45053 --- a/fs/notify/notification.c
45054 +++ b/fs/notify/notification.c
45055 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
45056 * get set to 0 so it will never get 'freed'
45057 */
45058 static struct fsnotify_event *q_overflow_event;
45059 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45060 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45061
45062 /**
45063 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
45064 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45065 */
45066 u32 fsnotify_get_cookie(void)
45067 {
45068 - return atomic_inc_return(&fsnotify_sync_cookie);
45069 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
45070 }
45071 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
45072
45073 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
45074 index 99e3610..02c1068 100644
45075 --- a/fs/ntfs/dir.c
45076 +++ b/fs/ntfs/dir.c
45077 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
45078 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
45079 ~(s64)(ndir->itype.index.block_size - 1)));
45080 /* Bounds checks. */
45081 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45082 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45083 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
45084 "inode 0x%lx or driver bug.", vdir->i_ino);
45085 goto err_out;
45086 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
45087 index c587e2d..3641eaa 100644
45088 --- a/fs/ntfs/file.c
45089 +++ b/fs/ntfs/file.c
45090 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
45091 #endif /* NTFS_RW */
45092 };
45093
45094 -const struct file_operations ntfs_empty_file_ops = {};
45095 +const struct file_operations ntfs_empty_file_ops __read_only;
45096
45097 -const struct inode_operations ntfs_empty_inode_ops = {};
45098 +const struct inode_operations ntfs_empty_inode_ops __read_only;
45099 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
45100 index 210c352..a174f83 100644
45101 --- a/fs/ocfs2/localalloc.c
45102 +++ b/fs/ocfs2/localalloc.c
45103 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
45104 goto bail;
45105 }
45106
45107 - atomic_inc(&osb->alloc_stats.moves);
45108 + atomic_inc_unchecked(&osb->alloc_stats.moves);
45109
45110 bail:
45111 if (handle)
45112 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
45113 index d355e6e..578d905 100644
45114 --- a/fs/ocfs2/ocfs2.h
45115 +++ b/fs/ocfs2/ocfs2.h
45116 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
45117
45118 struct ocfs2_alloc_stats
45119 {
45120 - atomic_t moves;
45121 - atomic_t local_data;
45122 - atomic_t bitmap_data;
45123 - atomic_t bg_allocs;
45124 - atomic_t bg_extends;
45125 + atomic_unchecked_t moves;
45126 + atomic_unchecked_t local_data;
45127 + atomic_unchecked_t bitmap_data;
45128 + atomic_unchecked_t bg_allocs;
45129 + atomic_unchecked_t bg_extends;
45130 };
45131
45132 enum ocfs2_local_alloc_state
45133 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
45134 index ba5d97e..c77db25 100644
45135 --- a/fs/ocfs2/suballoc.c
45136 +++ b/fs/ocfs2/suballoc.c
45137 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
45138 mlog_errno(status);
45139 goto bail;
45140 }
45141 - atomic_inc(&osb->alloc_stats.bg_extends);
45142 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45143
45144 /* You should never ask for this much metadata */
45145 BUG_ON(bits_wanted >
45146 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
45147 mlog_errno(status);
45148 goto bail;
45149 }
45150 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45151 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45152
45153 *suballoc_loc = res.sr_bg_blkno;
45154 *suballoc_bit_start = res.sr_bit_offset;
45155 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
45156 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45157 res->sr_bits);
45158
45159 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45160 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45161
45162 BUG_ON(res->sr_bits != 1);
45163
45164 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
45165 mlog_errno(status);
45166 goto bail;
45167 }
45168 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45169 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45170
45171 BUG_ON(res.sr_bits != 1);
45172
45173 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45174 cluster_start,
45175 num_clusters);
45176 if (!status)
45177 - atomic_inc(&osb->alloc_stats.local_data);
45178 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
45179 } else {
45180 if (min_clusters > (osb->bitmap_cpg - 1)) {
45181 /* The only paths asking for contiguousness
45182 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45183 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45184 res.sr_bg_blkno,
45185 res.sr_bit_offset);
45186 - atomic_inc(&osb->alloc_stats.bitmap_data);
45187 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45188 *num_clusters = res.sr_bits;
45189 }
45190 }
45191 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
45192 index 4994f8b..eaab8eb 100644
45193 --- a/fs/ocfs2/super.c
45194 +++ b/fs/ocfs2/super.c
45195 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
45196 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45197 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45198 "Stats",
45199 - atomic_read(&osb->alloc_stats.bitmap_data),
45200 - atomic_read(&osb->alloc_stats.local_data),
45201 - atomic_read(&osb->alloc_stats.bg_allocs),
45202 - atomic_read(&osb->alloc_stats.moves),
45203 - atomic_read(&osb->alloc_stats.bg_extends));
45204 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45205 + atomic_read_unchecked(&osb->alloc_stats.local_data),
45206 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45207 + atomic_read_unchecked(&osb->alloc_stats.moves),
45208 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45209
45210 out += snprintf(buf + out, len - out,
45211 "%10s => State: %u Descriptor: %llu Size: %u bits "
45212 @@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
45213 spin_lock_init(&osb->osb_xattr_lock);
45214 ocfs2_init_steal_slots(osb);
45215
45216 - atomic_set(&osb->alloc_stats.moves, 0);
45217 - atomic_set(&osb->alloc_stats.local_data, 0);
45218 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
45219 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
45220 - atomic_set(&osb->alloc_stats.bg_extends, 0);
45221 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45222 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45223 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45224 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45225 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45226
45227 /* Copy the blockcheck stats from the superblock probe */
45228 osb->osb_ecc_stats = *stats;
45229 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
45230 index 5d22872..523db20 100644
45231 --- a/fs/ocfs2/symlink.c
45232 +++ b/fs/ocfs2/symlink.c
45233 @@ -142,7 +142,7 @@ bail:
45234
45235 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45236 {
45237 - char *link = nd_get_link(nd);
45238 + const char *link = nd_get_link(nd);
45239 if (!IS_ERR(link))
45240 kfree(link);
45241 }
45242 diff --git a/fs/open.c b/fs/open.c
45243 index 22c41b5..78894cf 100644
45244 --- a/fs/open.c
45245 +++ b/fs/open.c
45246 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
45247 error = locks_verify_truncate(inode, NULL, length);
45248 if (!error)
45249 error = security_path_truncate(&path);
45250 +
45251 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45252 + error = -EACCES;
45253 +
45254 if (!error)
45255 error = do_truncate(path.dentry, length, 0, NULL);
45256
45257 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
45258 if (__mnt_is_readonly(path.mnt))
45259 res = -EROFS;
45260
45261 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45262 + res = -EACCES;
45263 +
45264 out_path_release:
45265 path_put(&path);
45266 out:
45267 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
45268 if (error)
45269 goto dput_and_out;
45270
45271 + gr_log_chdir(path.dentry, path.mnt);
45272 +
45273 set_fs_pwd(current->fs, &path);
45274
45275 dput_and_out:
45276 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
45277 goto out_putf;
45278
45279 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45280 +
45281 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45282 + error = -EPERM;
45283 +
45284 + if (!error)
45285 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45286 +
45287 if (!error)
45288 set_fs_pwd(current->fs, &file->f_path);
45289 out_putf:
45290 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
45291 if (error)
45292 goto dput_and_out;
45293
45294 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45295 + goto dput_and_out;
45296 +
45297 set_fs_root(current->fs, &path);
45298 +
45299 + gr_handle_chroot_chdir(&path);
45300 +
45301 error = 0;
45302 dput_and_out:
45303 path_put(&path);
45304 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
45305 if (error)
45306 return error;
45307 mutex_lock(&inode->i_mutex);
45308 +
45309 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
45310 + error = -EACCES;
45311 + goto out_unlock;
45312 + }
45313 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45314 + error = -EACCES;
45315 + goto out_unlock;
45316 + }
45317 +
45318 error = security_path_chmod(path->dentry, path->mnt, mode);
45319 if (error)
45320 goto out_unlock;
45321 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
45322 int error;
45323 struct iattr newattrs;
45324
45325 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
45326 + return -EACCES;
45327 +
45328 newattrs.ia_valid = ATTR_CTIME;
45329 if (user != (uid_t) -1) {
45330 newattrs.ia_valid |= ATTR_UID;
45331 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
45332 index 6296b40..417c00f 100644
45333 --- a/fs/partitions/efi.c
45334 +++ b/fs/partitions/efi.c
45335 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
45336 if (!gpt)
45337 return NULL;
45338
45339 + if (!le32_to_cpu(gpt->num_partition_entries))
45340 + return NULL;
45341 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
45342 + if (!pte)
45343 + return NULL;
45344 +
45345 count = le32_to_cpu(gpt->num_partition_entries) *
45346 le32_to_cpu(gpt->sizeof_partition_entry);
45347 - if (!count)
45348 - return NULL;
45349 - pte = kzalloc(count, GFP_KERNEL);
45350 - if (!pte)
45351 - return NULL;
45352 -
45353 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
45354 (u8 *) pte,
45355 count) < count) {
45356 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
45357 index bd8ae78..539d250 100644
45358 --- a/fs/partitions/ldm.c
45359 +++ b/fs/partitions/ldm.c
45360 @@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
45361 goto found;
45362 }
45363
45364 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45365 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45366 if (!f) {
45367 ldm_crit ("Out of memory.");
45368 return false;
45369 diff --git a/fs/pipe.c b/fs/pipe.c
45370 index 4065f07..68c0706 100644
45371 --- a/fs/pipe.c
45372 +++ b/fs/pipe.c
45373 @@ -420,9 +420,9 @@ redo:
45374 }
45375 if (bufs) /* More to do? */
45376 continue;
45377 - if (!pipe->writers)
45378 + if (!atomic_read(&pipe->writers))
45379 break;
45380 - if (!pipe->waiting_writers) {
45381 + if (!atomic_read(&pipe->waiting_writers)) {
45382 /* syscall merging: Usually we must not sleep
45383 * if O_NONBLOCK is set, or if we got some data.
45384 * But if a writer sleeps in kernel space, then
45385 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45386 mutex_lock(&inode->i_mutex);
45387 pipe = inode->i_pipe;
45388
45389 - if (!pipe->readers) {
45390 + if (!atomic_read(&pipe->readers)) {
45391 send_sig(SIGPIPE, current, 0);
45392 ret = -EPIPE;
45393 goto out;
45394 @@ -530,7 +530,7 @@ redo1:
45395 for (;;) {
45396 int bufs;
45397
45398 - if (!pipe->readers) {
45399 + if (!atomic_read(&pipe->readers)) {
45400 send_sig(SIGPIPE, current, 0);
45401 if (!ret)
45402 ret = -EPIPE;
45403 @@ -616,9 +616,9 @@ redo2:
45404 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45405 do_wakeup = 0;
45406 }
45407 - pipe->waiting_writers++;
45408 + atomic_inc(&pipe->waiting_writers);
45409 pipe_wait(pipe);
45410 - pipe->waiting_writers--;
45411 + atomic_dec(&pipe->waiting_writers);
45412 }
45413 out:
45414 mutex_unlock(&inode->i_mutex);
45415 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45416 mask = 0;
45417 if (filp->f_mode & FMODE_READ) {
45418 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45419 - if (!pipe->writers && filp->f_version != pipe->w_counter)
45420 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45421 mask |= POLLHUP;
45422 }
45423
45424 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45425 * Most Unices do not set POLLERR for FIFOs but on Linux they
45426 * behave exactly like pipes for poll().
45427 */
45428 - if (!pipe->readers)
45429 + if (!atomic_read(&pipe->readers))
45430 mask |= POLLERR;
45431 }
45432
45433 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45434
45435 mutex_lock(&inode->i_mutex);
45436 pipe = inode->i_pipe;
45437 - pipe->readers -= decr;
45438 - pipe->writers -= decw;
45439 + atomic_sub(decr, &pipe->readers);
45440 + atomic_sub(decw, &pipe->writers);
45441
45442 - if (!pipe->readers && !pipe->writers) {
45443 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45444 free_pipe_info(inode);
45445 } else {
45446 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45447 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45448
45449 if (inode->i_pipe) {
45450 ret = 0;
45451 - inode->i_pipe->readers++;
45452 + atomic_inc(&inode->i_pipe->readers);
45453 }
45454
45455 mutex_unlock(&inode->i_mutex);
45456 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45457
45458 if (inode->i_pipe) {
45459 ret = 0;
45460 - inode->i_pipe->writers++;
45461 + atomic_inc(&inode->i_pipe->writers);
45462 }
45463
45464 mutex_unlock(&inode->i_mutex);
45465 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45466 if (inode->i_pipe) {
45467 ret = 0;
45468 if (filp->f_mode & FMODE_READ)
45469 - inode->i_pipe->readers++;
45470 + atomic_inc(&inode->i_pipe->readers);
45471 if (filp->f_mode & FMODE_WRITE)
45472 - inode->i_pipe->writers++;
45473 + atomic_inc(&inode->i_pipe->writers);
45474 }
45475
45476 mutex_unlock(&inode->i_mutex);
45477 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45478 inode->i_pipe = NULL;
45479 }
45480
45481 -static struct vfsmount *pipe_mnt __read_mostly;
45482 +struct vfsmount *pipe_mnt __read_mostly;
45483
45484 /*
45485 * pipefs_dname() is called from d_path().
45486 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45487 goto fail_iput;
45488 inode->i_pipe = pipe;
45489
45490 - pipe->readers = pipe->writers = 1;
45491 + atomic_set(&pipe->readers, 1);
45492 + atomic_set(&pipe->writers, 1);
45493 inode->i_fop = &rdwr_pipefifo_fops;
45494
45495 /*
45496 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45497 index 15af622..0e9f4467 100644
45498 --- a/fs/proc/Kconfig
45499 +++ b/fs/proc/Kconfig
45500 @@ -30,12 +30,12 @@ config PROC_FS
45501
45502 config PROC_KCORE
45503 bool "/proc/kcore support" if !ARM
45504 - depends on PROC_FS && MMU
45505 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45506
45507 config PROC_VMCORE
45508 bool "/proc/vmcore support"
45509 - depends on PROC_FS && CRASH_DUMP
45510 - default y
45511 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45512 + default n
45513 help
45514 Exports the dump image of crashed kernel in ELF format.
45515
45516 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45517 limited in memory.
45518
45519 config PROC_PAGE_MONITOR
45520 - default y
45521 - depends on PROC_FS && MMU
45522 + default n
45523 + depends on PROC_FS && MMU && !GRKERNSEC
45524 bool "Enable /proc page monitoring" if EXPERT
45525 help
45526 Various /proc files exist to monitor process memory utilization:
45527 diff --git a/fs/proc/array.c b/fs/proc/array.c
45528 index 3a1dafd..1456746 100644
45529 --- a/fs/proc/array.c
45530 +++ b/fs/proc/array.c
45531 @@ -60,6 +60,7 @@
45532 #include <linux/tty.h>
45533 #include <linux/string.h>
45534 #include <linux/mman.h>
45535 +#include <linux/grsecurity.h>
45536 #include <linux/proc_fs.h>
45537 #include <linux/ioport.h>
45538 #include <linux/uaccess.h>
45539 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45540 seq_putc(m, '\n');
45541 }
45542
45543 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45544 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
45545 +{
45546 + if (p->mm)
45547 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45548 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45549 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45550 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45551 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45552 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45553 + else
45554 + seq_printf(m, "PaX:\t-----\n");
45555 +}
45556 +#endif
45557 +
45558 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45559 struct pid *pid, struct task_struct *task)
45560 {
45561 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45562 task_cpus_allowed(m, task);
45563 cpuset_task_status_allowed(m, task);
45564 task_context_switch_counts(m, task);
45565 +
45566 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45567 + task_pax(m, task);
45568 +#endif
45569 +
45570 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45571 + task_grsec_rbac(m, task);
45572 +#endif
45573 +
45574 return 0;
45575 }
45576
45577 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45578 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45579 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45580 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45581 +#endif
45582 +
45583 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45584 struct pid *pid, struct task_struct *task, int whole)
45585 {
45586 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45587 char tcomm[sizeof(task->comm)];
45588 unsigned long flags;
45589
45590 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45591 + if (current->exec_id != m->exec_id) {
45592 + gr_log_badprocpid("stat");
45593 + return 0;
45594 + }
45595 +#endif
45596 +
45597 state = *get_task_state(task);
45598 vsize = eip = esp = 0;
45599 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45600 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45601 gtime = task->gtime;
45602 }
45603
45604 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45605 + if (PAX_RAND_FLAGS(mm)) {
45606 + eip = 0;
45607 + esp = 0;
45608 + wchan = 0;
45609 + }
45610 +#endif
45611 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45612 + wchan = 0;
45613 + eip =0;
45614 + esp =0;
45615 +#endif
45616 +
45617 /* scale priority and nice values from timeslices to -20..20 */
45618 /* to make it look like a "normal" Unix priority/nice value */
45619 priority = task_prio(task);
45620 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45621 vsize,
45622 mm ? get_mm_rss(mm) : 0,
45623 rsslim,
45624 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45625 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45626 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45627 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45628 +#else
45629 mm ? (permitted ? mm->start_code : 1) : 0,
45630 mm ? (permitted ? mm->end_code : 1) : 0,
45631 (permitted && mm) ? mm->start_stack : 0,
45632 +#endif
45633 esp,
45634 eip,
45635 /* The signal information here is obsolete.
45636 @@ -535,6 +592,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45637 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
45638 struct mm_struct *mm = get_task_mm(task);
45639
45640 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45641 + if (current->exec_id != m->exec_id) {
45642 + gr_log_badprocpid("statm");
45643 + return 0;
45644 + }
45645 +#endif
45646 +
45647 if (mm) {
45648 size = task_statm(mm, &shared, &text, &data, &resident);
45649 mmput(mm);
45650 @@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45651
45652 return 0;
45653 }
45654 +
45655 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45656 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45657 +{
45658 + u32 curr_ip = 0;
45659 + unsigned long flags;
45660 +
45661 + if (lock_task_sighand(task, &flags)) {
45662 + curr_ip = task->signal->curr_ip;
45663 + unlock_task_sighand(task, &flags);
45664 + }
45665 +
45666 + return sprintf(buffer, "%pI4\n", &curr_ip);
45667 +}
45668 +#endif
45669 diff --git a/fs/proc/base.c b/fs/proc/base.c
45670 index 1ace83d..f5e575d 100644
45671 --- a/fs/proc/base.c
45672 +++ b/fs/proc/base.c
45673 @@ -107,6 +107,22 @@ struct pid_entry {
45674 union proc_op op;
45675 };
45676
45677 +struct getdents_callback {
45678 + struct linux_dirent __user * current_dir;
45679 + struct linux_dirent __user * previous;
45680 + struct file * file;
45681 + int count;
45682 + int error;
45683 +};
45684 +
45685 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45686 + loff_t offset, u64 ino, unsigned int d_type)
45687 +{
45688 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45689 + buf->error = -EINVAL;
45690 + return 0;
45691 +}
45692 +
45693 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45694 .name = (NAME), \
45695 .len = sizeof(NAME) - 1, \
45696 @@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
45697 return result;
45698 }
45699
45700 -static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45701 -{
45702 - struct mm_struct *mm;
45703 - int err;
45704 -
45705 - err = mutex_lock_killable(&task->signal->cred_guard_mutex);
45706 - if (err)
45707 - return ERR_PTR(err);
45708 -
45709 - mm = get_task_mm(task);
45710 - if (mm && mm != current->mm &&
45711 - !ptrace_may_access(task, mode)) {
45712 - mmput(mm);
45713 - mm = ERR_PTR(-EACCES);
45714 - }
45715 - mutex_unlock(&task->signal->cred_guard_mutex);
45716 -
45717 - return mm;
45718 -}
45719 -
45720 struct mm_struct *mm_for_maps(struct task_struct *task)
45721 {
45722 return mm_access(task, PTRACE_MODE_READ);
45723 @@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45724 if (!mm->arg_end)
45725 goto out_mm; /* Shh! No looking before we're done */
45726
45727 + if (gr_acl_handle_procpidmem(task))
45728 + goto out_mm;
45729 +
45730 len = mm->arg_end - mm->arg_start;
45731
45732 if (len > PAGE_SIZE)
45733 @@ -256,12 +255,28 @@ out:
45734 return res;
45735 }
45736
45737 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45738 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45739 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45740 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45741 +#endif
45742 +
45743 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45744 {
45745 struct mm_struct *mm = mm_for_maps(task);
45746 int res = PTR_ERR(mm);
45747 if (mm && !IS_ERR(mm)) {
45748 unsigned int nwords = 0;
45749 +
45750 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45751 + /* allow if we're currently ptracing this task */
45752 + if (PAX_RAND_FLAGS(mm) &&
45753 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45754 + mmput(mm);
45755 + return 0;
45756 + }
45757 +#endif
45758 +
45759 do {
45760 nwords += 2;
45761 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45762 @@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45763 }
45764
45765
45766 -#ifdef CONFIG_KALLSYMS
45767 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45768 /*
45769 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45770 * Returns the resolved symbol. If that fails, simply return the address.
45771 @@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task)
45772 mutex_unlock(&task->signal->cred_guard_mutex);
45773 }
45774
45775 -#ifdef CONFIG_STACKTRACE
45776 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45777
45778 #define MAX_STACK_TRACE_DEPTH 64
45779
45780 @@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45781 return count;
45782 }
45783
45784 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45785 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45786 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45787 {
45788 long nr;
45789 @@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45790 /************************************************************************/
45791
45792 /* permission checks */
45793 -static int proc_fd_access_allowed(struct inode *inode)
45794 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45795 {
45796 struct task_struct *task;
45797 int allowed = 0;
45798 @@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45799 */
45800 task = get_proc_task(inode);
45801 if (task) {
45802 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45803 + if (log)
45804 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45805 + else
45806 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45807 put_task_struct(task);
45808 }
45809 return allowed;
45810 @@ -786,6 +804,10 @@ static int mem_open(struct inode* inode, struct file* file)
45811 file->f_mode |= FMODE_UNSIGNED_OFFSET;
45812 file->private_data = mm;
45813
45814 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45815 + file->f_version = current->exec_id;
45816 +#endif
45817 +
45818 return 0;
45819 }
45820
45821 @@ -797,6 +819,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
45822 ssize_t copied;
45823 char *page;
45824
45825 +#ifdef CONFIG_GRKERNSEC
45826 + if (write)
45827 + return -EPERM;
45828 +#endif
45829 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45830 + if (file->f_version != current->exec_id) {
45831 + gr_log_badprocpid("mem");
45832 + return 0;
45833 + }
45834 +#endif
45835 +
45836 if (!mm)
45837 return 0;
45838
45839 @@ -897,6 +930,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45840 if (!task)
45841 goto out_no_task;
45842
45843 + if (gr_acl_handle_procpidmem(task))
45844 + goto out;
45845 +
45846 ret = -ENOMEM;
45847 page = (char *)__get_free_page(GFP_TEMPORARY);
45848 if (!page)
45849 @@ -1519,7 +1555,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45850 path_put(&nd->path);
45851
45852 /* Are we allowed to snoop on the tasks file descriptors? */
45853 - if (!proc_fd_access_allowed(inode))
45854 + if (!proc_fd_access_allowed(inode,0))
45855 goto out;
45856
45857 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45858 @@ -1558,8 +1594,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45859 struct path path;
45860
45861 /* Are we allowed to snoop on the tasks file descriptors? */
45862 - if (!proc_fd_access_allowed(inode))
45863 - goto out;
45864 + /* logging this is needed for learning on chromium to work properly,
45865 + but we don't want to flood the logs from 'ps' which does a readlink
45866 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45867 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45868 + */
45869 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45870 + if (!proc_fd_access_allowed(inode,0))
45871 + goto out;
45872 + } else {
45873 + if (!proc_fd_access_allowed(inode,1))
45874 + goto out;
45875 + }
45876
45877 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45878 if (error)
45879 @@ -1624,7 +1670,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45880 rcu_read_lock();
45881 cred = __task_cred(task);
45882 inode->i_uid = cred->euid;
45883 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45884 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45885 +#else
45886 inode->i_gid = cred->egid;
45887 +#endif
45888 rcu_read_unlock();
45889 }
45890 security_task_to_inode(task, inode);
45891 @@ -1642,6 +1692,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45892 struct inode *inode = dentry->d_inode;
45893 struct task_struct *task;
45894 const struct cred *cred;
45895 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45896 + const struct cred *tmpcred = current_cred();
45897 +#endif
45898
45899 generic_fillattr(inode, stat);
45900
45901 @@ -1649,13 +1702,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45902 stat->uid = 0;
45903 stat->gid = 0;
45904 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45905 +
45906 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45907 + rcu_read_unlock();
45908 + return -ENOENT;
45909 + }
45910 +
45911 if (task) {
45912 + cred = __task_cred(task);
45913 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45914 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45915 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45916 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45917 +#endif
45918 + ) {
45919 +#endif
45920 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45921 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45922 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45923 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45924 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45925 +#endif
45926 task_dumpable(task)) {
45927 - cred = __task_cred(task);
45928 stat->uid = cred->euid;
45929 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45930 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45931 +#else
45932 stat->gid = cred->egid;
45933 +#endif
45934 }
45935 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45936 + } else {
45937 + rcu_read_unlock();
45938 + return -ENOENT;
45939 + }
45940 +#endif
45941 }
45942 rcu_read_unlock();
45943 return 0;
45944 @@ -1692,11 +1773,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45945
45946 if (task) {
45947 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45948 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45949 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45950 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45951 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45952 +#endif
45953 task_dumpable(task)) {
45954 rcu_read_lock();
45955 cred = __task_cred(task);
45956 inode->i_uid = cred->euid;
45957 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45958 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45959 +#else
45960 inode->i_gid = cred->egid;
45961 +#endif
45962 rcu_read_unlock();
45963 } else {
45964 inode->i_uid = 0;
45965 @@ -1814,7 +1904,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45966 int fd = proc_fd(inode);
45967
45968 if (task) {
45969 - files = get_files_struct(task);
45970 + if (!gr_acl_handle_procpidmem(task))
45971 + files = get_files_struct(task);
45972 put_task_struct(task);
45973 }
45974 if (files) {
45975 @@ -2082,11 +2173,21 @@ static const struct file_operations proc_fd_operations = {
45976 */
45977 static int proc_fd_permission(struct inode *inode, int mask)
45978 {
45979 + struct task_struct *task;
45980 int rv = generic_permission(inode, mask);
45981 - if (rv == 0)
45982 - return 0;
45983 +
45984 if (task_pid(current) == proc_pid(inode))
45985 rv = 0;
45986 +
45987 + task = get_proc_task(inode);
45988 + if (task == NULL)
45989 + return rv;
45990 +
45991 + if (gr_acl_handle_procpidmem(task))
45992 + rv = -EACCES;
45993 +
45994 + put_task_struct(task);
45995 +
45996 return rv;
45997 }
45998
45999 @@ -2196,6 +2297,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
46000 if (!task)
46001 goto out_no_task;
46002
46003 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46004 + goto out;
46005 +
46006 /*
46007 * Yes, it does not scale. And it should not. Don't add
46008 * new entries into /proc/<tgid>/ without very good reasons.
46009 @@ -2240,6 +2344,9 @@ static int proc_pident_readdir(struct file *filp,
46010 if (!task)
46011 goto out_no_task;
46012
46013 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46014 + goto out;
46015 +
46016 ret = 0;
46017 i = filp->f_pos;
46018 switch (i) {
46019 @@ -2510,7 +2617,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
46020 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
46021 void *cookie)
46022 {
46023 - char *s = nd_get_link(nd);
46024 + const char *s = nd_get_link(nd);
46025 if (!IS_ERR(s))
46026 __putname(s);
46027 }
46028 @@ -2708,7 +2815,7 @@ static const struct pid_entry tgid_base_stuff[] = {
46029 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
46030 #endif
46031 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46032 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46033 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46034 INF("syscall", S_IRUGO, proc_pid_syscall),
46035 #endif
46036 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46037 @@ -2733,10 +2840,10 @@ static const struct pid_entry tgid_base_stuff[] = {
46038 #ifdef CONFIG_SECURITY
46039 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46040 #endif
46041 -#ifdef CONFIG_KALLSYMS
46042 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46043 INF("wchan", S_IRUGO, proc_pid_wchan),
46044 #endif
46045 -#ifdef CONFIG_STACKTRACE
46046 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46047 ONE("stack", S_IRUGO, proc_pid_stack),
46048 #endif
46049 #ifdef CONFIG_SCHEDSTATS
46050 @@ -2770,6 +2877,9 @@ static const struct pid_entry tgid_base_stuff[] = {
46051 #ifdef CONFIG_HARDWALL
46052 INF("hardwall", S_IRUGO, proc_pid_hardwall),
46053 #endif
46054 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46055 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
46056 +#endif
46057 };
46058
46059 static int proc_tgid_base_readdir(struct file * filp,
46060 @@ -2895,7 +3005,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
46061 if (!inode)
46062 goto out;
46063
46064 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46065 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
46066 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46067 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46068 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
46069 +#else
46070 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
46071 +#endif
46072 inode->i_op = &proc_tgid_base_inode_operations;
46073 inode->i_fop = &proc_tgid_base_operations;
46074 inode->i_flags|=S_IMMUTABLE;
46075 @@ -2937,7 +3054,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
46076 if (!task)
46077 goto out;
46078
46079 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46080 + goto out_put_task;
46081 +
46082 result = proc_pid_instantiate(dir, dentry, task, NULL);
46083 +out_put_task:
46084 put_task_struct(task);
46085 out:
46086 return result;
46087 @@ -3002,6 +3123,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46088 {
46089 unsigned int nr;
46090 struct task_struct *reaper;
46091 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46092 + const struct cred *tmpcred = current_cred();
46093 + const struct cred *itercred;
46094 +#endif
46095 + filldir_t __filldir = filldir;
46096 struct tgid_iter iter;
46097 struct pid_namespace *ns;
46098
46099 @@ -3025,8 +3151,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46100 for (iter = next_tgid(ns, iter);
46101 iter.task;
46102 iter.tgid += 1, iter = next_tgid(ns, iter)) {
46103 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46104 + rcu_read_lock();
46105 + itercred = __task_cred(iter.task);
46106 +#endif
46107 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
46108 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46109 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
46110 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46111 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46112 +#endif
46113 + )
46114 +#endif
46115 + )
46116 + __filldir = &gr_fake_filldir;
46117 + else
46118 + __filldir = filldir;
46119 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46120 + rcu_read_unlock();
46121 +#endif
46122 filp->f_pos = iter.tgid + TGID_OFFSET;
46123 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
46124 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
46125 put_task_struct(iter.task);
46126 goto out;
46127 }
46128 @@ -3054,7 +3199,7 @@ static const struct pid_entry tid_base_stuff[] = {
46129 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
46130 #endif
46131 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46132 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46133 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46134 INF("syscall", S_IRUGO, proc_pid_syscall),
46135 #endif
46136 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46137 @@ -3078,10 +3223,10 @@ static const struct pid_entry tid_base_stuff[] = {
46138 #ifdef CONFIG_SECURITY
46139 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46140 #endif
46141 -#ifdef CONFIG_KALLSYMS
46142 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46143 INF("wchan", S_IRUGO, proc_pid_wchan),
46144 #endif
46145 -#ifdef CONFIG_STACKTRACE
46146 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46147 ONE("stack", S_IRUGO, proc_pid_stack),
46148 #endif
46149 #ifdef CONFIG_SCHEDSTATS
46150 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
46151 index 82676e3..5f8518a 100644
46152 --- a/fs/proc/cmdline.c
46153 +++ b/fs/proc/cmdline.c
46154 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
46155
46156 static int __init proc_cmdline_init(void)
46157 {
46158 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46159 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
46160 +#else
46161 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
46162 +#endif
46163 return 0;
46164 }
46165 module_init(proc_cmdline_init);
46166 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
46167 index b143471..bb105e5 100644
46168 --- a/fs/proc/devices.c
46169 +++ b/fs/proc/devices.c
46170 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
46171
46172 static int __init proc_devices_init(void)
46173 {
46174 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46175 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
46176 +#else
46177 proc_create("devices", 0, NULL, &proc_devinfo_operations);
46178 +#endif
46179 return 0;
46180 }
46181 module_init(proc_devices_init);
46182 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
46183 index 7737c54..7172574 100644
46184 --- a/fs/proc/inode.c
46185 +++ b/fs/proc/inode.c
46186 @@ -18,12 +18,18 @@
46187 #include <linux/module.h>
46188 #include <linux/sysctl.h>
46189 #include <linux/slab.h>
46190 +#include <linux/grsecurity.h>
46191
46192 #include <asm/system.h>
46193 #include <asm/uaccess.h>
46194
46195 #include "internal.h"
46196
46197 +#ifdef CONFIG_PROC_SYSCTL
46198 +extern const struct inode_operations proc_sys_inode_operations;
46199 +extern const struct inode_operations proc_sys_dir_operations;
46200 +#endif
46201 +
46202 static void proc_evict_inode(struct inode *inode)
46203 {
46204 struct proc_dir_entry *de;
46205 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
46206 ns_ops = PROC_I(inode)->ns_ops;
46207 if (ns_ops && ns_ops->put)
46208 ns_ops->put(PROC_I(inode)->ns);
46209 +
46210 +#ifdef CONFIG_PROC_SYSCTL
46211 + if (inode->i_op == &proc_sys_inode_operations ||
46212 + inode->i_op == &proc_sys_dir_operations)
46213 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
46214 +#endif
46215 +
46216 }
46217
46218 static struct kmem_cache * proc_inode_cachep;
46219 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
46220 if (de->mode) {
46221 inode->i_mode = de->mode;
46222 inode->i_uid = de->uid;
46223 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46224 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46225 +#else
46226 inode->i_gid = de->gid;
46227 +#endif
46228 }
46229 if (de->size)
46230 inode->i_size = de->size;
46231 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
46232 index 7838e5c..ff92cbc 100644
46233 --- a/fs/proc/internal.h
46234 +++ b/fs/proc/internal.h
46235 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46236 struct pid *pid, struct task_struct *task);
46237 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46238 struct pid *pid, struct task_struct *task);
46239 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46240 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46241 +#endif
46242 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46243
46244 extern const struct file_operations proc_maps_operations;
46245 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
46246 index d245cb2..f4e8498 100644
46247 --- a/fs/proc/kcore.c
46248 +++ b/fs/proc/kcore.c
46249 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46250 * the addresses in the elf_phdr on our list.
46251 */
46252 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46253 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46254 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46255 + if (tsz > buflen)
46256 tsz = buflen;
46257 -
46258 +
46259 while (buflen) {
46260 struct kcore_list *m;
46261
46262 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46263 kfree(elf_buf);
46264 } else {
46265 if (kern_addr_valid(start)) {
46266 - unsigned long n;
46267 + char *elf_buf;
46268 + mm_segment_t oldfs;
46269
46270 - n = copy_to_user(buffer, (char *)start, tsz);
46271 - /*
46272 - * We cannot distingush between fault on source
46273 - * and fault on destination. When this happens
46274 - * we clear too and hope it will trigger the
46275 - * EFAULT again.
46276 - */
46277 - if (n) {
46278 - if (clear_user(buffer + tsz - n,
46279 - n))
46280 + elf_buf = kmalloc(tsz, GFP_KERNEL);
46281 + if (!elf_buf)
46282 + return -ENOMEM;
46283 + oldfs = get_fs();
46284 + set_fs(KERNEL_DS);
46285 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46286 + set_fs(oldfs);
46287 + if (copy_to_user(buffer, elf_buf, tsz)) {
46288 + kfree(elf_buf);
46289 return -EFAULT;
46290 + }
46291 }
46292 + set_fs(oldfs);
46293 + kfree(elf_buf);
46294 } else {
46295 if (clear_user(buffer, tsz))
46296 return -EFAULT;
46297 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46298
46299 static int open_kcore(struct inode *inode, struct file *filp)
46300 {
46301 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46302 + return -EPERM;
46303 +#endif
46304 if (!capable(CAP_SYS_RAWIO))
46305 return -EPERM;
46306 if (kcore_need_update)
46307 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46308 index 80e4645..53e5fcf 100644
46309 --- a/fs/proc/meminfo.c
46310 +++ b/fs/proc/meminfo.c
46311 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46312 vmi.used >> 10,
46313 vmi.largest_chunk >> 10
46314 #ifdef CONFIG_MEMORY_FAILURE
46315 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46316 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46317 #endif
46318 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46319 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46320 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46321 index b1822dd..df622cb 100644
46322 --- a/fs/proc/nommu.c
46323 +++ b/fs/proc/nommu.c
46324 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46325 if (len < 1)
46326 len = 1;
46327 seq_printf(m, "%*c", len, ' ');
46328 - seq_path(m, &file->f_path, "");
46329 + seq_path(m, &file->f_path, "\n\\");
46330 }
46331
46332 seq_putc(m, '\n');
46333 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46334 index f738024..876984a 100644
46335 --- a/fs/proc/proc_net.c
46336 +++ b/fs/proc/proc_net.c
46337 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46338 struct task_struct *task;
46339 struct nsproxy *ns;
46340 struct net *net = NULL;
46341 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46342 + const struct cred *cred = current_cred();
46343 +#endif
46344 +
46345 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46346 + if (cred->fsuid)
46347 + return net;
46348 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46349 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46350 + return net;
46351 +#endif
46352
46353 rcu_read_lock();
46354 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46355 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46356 index a6b6217..1e0579d 100644
46357 --- a/fs/proc/proc_sysctl.c
46358 +++ b/fs/proc/proc_sysctl.c
46359 @@ -9,11 +9,13 @@
46360 #include <linux/namei.h>
46361 #include "internal.h"
46362
46363 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46364 +
46365 static const struct dentry_operations proc_sys_dentry_operations;
46366 static const struct file_operations proc_sys_file_operations;
46367 -static const struct inode_operations proc_sys_inode_operations;
46368 +const struct inode_operations proc_sys_inode_operations;
46369 static const struct file_operations proc_sys_dir_file_operations;
46370 -static const struct inode_operations proc_sys_dir_operations;
46371 +const struct inode_operations proc_sys_dir_operations;
46372
46373 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46374 {
46375 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46376
46377 err = NULL;
46378 d_set_d_op(dentry, &proc_sys_dentry_operations);
46379 +
46380 + gr_handle_proc_create(dentry, inode);
46381 +
46382 d_add(dentry, inode);
46383
46384 + if (gr_handle_sysctl(p, MAY_EXEC))
46385 + err = ERR_PTR(-ENOENT);
46386 +
46387 out:
46388 sysctl_head_finish(head);
46389 return err;
46390 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46391 if (!table->proc_handler)
46392 goto out;
46393
46394 +#ifdef CONFIG_GRKERNSEC
46395 + error = -EPERM;
46396 + if (write && !capable(CAP_SYS_ADMIN))
46397 + goto out;
46398 +#endif
46399 +
46400 /* careful: calling conventions are nasty here */
46401 res = count;
46402 error = table->proc_handler(table, write, buf, &res, ppos);
46403 @@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46404 return -ENOMEM;
46405 } else {
46406 d_set_d_op(child, &proc_sys_dentry_operations);
46407 +
46408 + gr_handle_proc_create(child, inode);
46409 +
46410 d_add(child, inode);
46411 }
46412 } else {
46413 @@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46414 if (*pos < file->f_pos)
46415 continue;
46416
46417 + if (gr_handle_sysctl(table, 0))
46418 + continue;
46419 +
46420 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46421 if (res)
46422 return res;
46423 @@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46424 if (IS_ERR(head))
46425 return PTR_ERR(head);
46426
46427 + if (table && gr_handle_sysctl(table, MAY_EXEC))
46428 + return -ENOENT;
46429 +
46430 generic_fillattr(inode, stat);
46431 if (table)
46432 stat->mode = (stat->mode & S_IFMT) | table->mode;
46433 @@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46434 .llseek = generic_file_llseek,
46435 };
46436
46437 -static const struct inode_operations proc_sys_inode_operations = {
46438 +const struct inode_operations proc_sys_inode_operations = {
46439 .permission = proc_sys_permission,
46440 .setattr = proc_sys_setattr,
46441 .getattr = proc_sys_getattr,
46442 };
46443
46444 -static const struct inode_operations proc_sys_dir_operations = {
46445 +const struct inode_operations proc_sys_dir_operations = {
46446 .lookup = proc_sys_lookup,
46447 .permission = proc_sys_permission,
46448 .setattr = proc_sys_setattr,
46449 diff --git a/fs/proc/root.c b/fs/proc/root.c
46450 index 03102d9..4ae347e 100644
46451 --- a/fs/proc/root.c
46452 +++ b/fs/proc/root.c
46453 @@ -121,7 +121,15 @@ void __init proc_root_init(void)
46454 #ifdef CONFIG_PROC_DEVICETREE
46455 proc_device_tree_init();
46456 #endif
46457 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46458 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46459 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46460 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46461 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46462 +#endif
46463 +#else
46464 proc_mkdir("bus", NULL);
46465 +#endif
46466 proc_sys_init();
46467 }
46468
46469 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46470 index 7dcd2a2..b2f410e 100644
46471 --- a/fs/proc/task_mmu.c
46472 +++ b/fs/proc/task_mmu.c
46473 @@ -11,6 +11,7 @@
46474 #include <linux/rmap.h>
46475 #include <linux/swap.h>
46476 #include <linux/swapops.h>
46477 +#include <linux/grsecurity.h>
46478
46479 #include <asm/elf.h>
46480 #include <asm/uaccess.h>
46481 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46482 "VmExe:\t%8lu kB\n"
46483 "VmLib:\t%8lu kB\n"
46484 "VmPTE:\t%8lu kB\n"
46485 - "VmSwap:\t%8lu kB\n",
46486 - hiwater_vm << (PAGE_SHIFT-10),
46487 + "VmSwap:\t%8lu kB\n"
46488 +
46489 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46490 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46491 +#endif
46492 +
46493 + ,hiwater_vm << (PAGE_SHIFT-10),
46494 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46495 mm->locked_vm << (PAGE_SHIFT-10),
46496 mm->pinned_vm << (PAGE_SHIFT-10),
46497 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46498 data << (PAGE_SHIFT-10),
46499 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46500 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46501 - swap << (PAGE_SHIFT-10));
46502 + swap << (PAGE_SHIFT-10)
46503 +
46504 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46505 + , mm->context.user_cs_base, mm->context.user_cs_limit
46506 +#endif
46507 +
46508 + );
46509 }
46510
46511 unsigned long task_vsize(struct mm_struct *mm)
46512 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46513 return ret;
46514 }
46515
46516 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46517 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46518 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46519 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46520 +#endif
46521 +
46522 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46523 {
46524 struct mm_struct *mm = vma->vm_mm;
46525 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46526 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46527 }
46528
46529 - /* We don't show the stack guard page in /proc/maps */
46530 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46531 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46532 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46533 +#else
46534 start = vma->vm_start;
46535 - if (stack_guard_page_start(vma, start))
46536 - start += PAGE_SIZE;
46537 end = vma->vm_end;
46538 - if (stack_guard_page_end(vma, end))
46539 - end -= PAGE_SIZE;
46540 +#endif
46541
46542 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46543 start,
46544 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46545 flags & VM_WRITE ? 'w' : '-',
46546 flags & VM_EXEC ? 'x' : '-',
46547 flags & VM_MAYSHARE ? 's' : 'p',
46548 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46549 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46550 +#else
46551 pgoff,
46552 +#endif
46553 MAJOR(dev), MINOR(dev), ino, &len);
46554
46555 /*
46556 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46557 */
46558 if (file) {
46559 pad_len_spaces(m, len);
46560 - seq_path(m, &file->f_path, "\n");
46561 + seq_path(m, &file->f_path, "\n\\");
46562 } else {
46563 const char *name = arch_vma_name(vma);
46564 if (!name) {
46565 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46566 if (vma->vm_start <= mm->brk &&
46567 vma->vm_end >= mm->start_brk) {
46568 name = "[heap]";
46569 - } else if (vma->vm_start <= mm->start_stack &&
46570 - vma->vm_end >= mm->start_stack) {
46571 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46572 + (vma->vm_start <= mm->start_stack &&
46573 + vma->vm_end >= mm->start_stack)) {
46574 name = "[stack]";
46575 }
46576 } else {
46577 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
46578 struct proc_maps_private *priv = m->private;
46579 struct task_struct *task = priv->task;
46580
46581 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46582 + if (current->exec_id != m->exec_id) {
46583 + gr_log_badprocpid("maps");
46584 + return 0;
46585 + }
46586 +#endif
46587 +
46588 show_map_vma(m, vma);
46589
46590 if (m->count < m->size) /* vma is copied successfully */
46591 @@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
46592 .private = &mss,
46593 };
46594
46595 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46596 + if (current->exec_id != m->exec_id) {
46597 + gr_log_badprocpid("smaps");
46598 + return 0;
46599 + }
46600 +#endif
46601 memset(&mss, 0, sizeof mss);
46602 - mss.vma = vma;
46603 - /* mmap_sem is held in m_start */
46604 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46605 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46606 -
46607 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46608 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46609 +#endif
46610 + mss.vma = vma;
46611 + /* mmap_sem is held in m_start */
46612 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46613 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46614 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46615 + }
46616 +#endif
46617 show_map_vma(m, vma);
46618
46619 seq_printf(m,
46620 @@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
46621 "KernelPageSize: %8lu kB\n"
46622 "MMUPageSize: %8lu kB\n"
46623 "Locked: %8lu kB\n",
46624 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46625 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46626 +#else
46627 (vma->vm_end - vma->vm_start) >> 10,
46628 +#endif
46629 mss.resident >> 10,
46630 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46631 mss.shared_clean >> 10,
46632 @@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
46633 int n;
46634 char buffer[50];
46635
46636 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46637 + if (current->exec_id != m->exec_id) {
46638 + gr_log_badprocpid("numa_maps");
46639 + return 0;
46640 + }
46641 +#endif
46642 +
46643 if (!mm)
46644 return 0;
46645
46646 @@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
46647 mpol_to_str(buffer, sizeof(buffer), pol, 0);
46648 mpol_cond_put(pol);
46649
46650 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46651 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
46652 +#else
46653 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
46654 +#endif
46655
46656 if (file) {
46657 seq_printf(m, " file=");
46658 - seq_path(m, &file->f_path, "\n\t= ");
46659 + seq_path(m, &file->f_path, "\n\t\\= ");
46660 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46661 seq_printf(m, " heap");
46662 } else if (vma->vm_start <= mm->start_stack &&
46663 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46664 index 980de54..2a4db5f 100644
46665 --- a/fs/proc/task_nommu.c
46666 +++ b/fs/proc/task_nommu.c
46667 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46668 else
46669 bytes += kobjsize(mm);
46670
46671 - if (current->fs && current->fs->users > 1)
46672 + if (current->fs && atomic_read(&current->fs->users) > 1)
46673 sbytes += kobjsize(current->fs);
46674 else
46675 bytes += kobjsize(current->fs);
46676 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46677
46678 if (file) {
46679 pad_len_spaces(m, len);
46680 - seq_path(m, &file->f_path, "");
46681 + seq_path(m, &file->f_path, "\n\\");
46682 } else if (mm) {
46683 if (vma->vm_start <= mm->start_stack &&
46684 vma->vm_end >= mm->start_stack) {
46685 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46686 index d67908b..d13f6a6 100644
46687 --- a/fs/quota/netlink.c
46688 +++ b/fs/quota/netlink.c
46689 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46690 void quota_send_warning(short type, unsigned int id, dev_t dev,
46691 const char warntype)
46692 {
46693 - static atomic_t seq;
46694 + static atomic_unchecked_t seq;
46695 struct sk_buff *skb;
46696 void *msg_head;
46697 int ret;
46698 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46699 "VFS: Not enough memory to send quota warning.\n");
46700 return;
46701 }
46702 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46703 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46704 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46705 if (!msg_head) {
46706 printk(KERN_ERR
46707 diff --git a/fs/readdir.c b/fs/readdir.c
46708 index 356f715..c918d38 100644
46709 --- a/fs/readdir.c
46710 +++ b/fs/readdir.c
46711 @@ -17,6 +17,7 @@
46712 #include <linux/security.h>
46713 #include <linux/syscalls.h>
46714 #include <linux/unistd.h>
46715 +#include <linux/namei.h>
46716
46717 #include <asm/uaccess.h>
46718
46719 @@ -67,6 +68,7 @@ struct old_linux_dirent {
46720
46721 struct readdir_callback {
46722 struct old_linux_dirent __user * dirent;
46723 + struct file * file;
46724 int result;
46725 };
46726
46727 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46728 buf->result = -EOVERFLOW;
46729 return -EOVERFLOW;
46730 }
46731 +
46732 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46733 + return 0;
46734 +
46735 buf->result++;
46736 dirent = buf->dirent;
46737 if (!access_ok(VERIFY_WRITE, dirent,
46738 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46739
46740 buf.result = 0;
46741 buf.dirent = dirent;
46742 + buf.file = file;
46743
46744 error = vfs_readdir(file, fillonedir, &buf);
46745 if (buf.result)
46746 @@ -142,6 +149,7 @@ struct linux_dirent {
46747 struct getdents_callback {
46748 struct linux_dirent __user * current_dir;
46749 struct linux_dirent __user * previous;
46750 + struct file * file;
46751 int count;
46752 int error;
46753 };
46754 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46755 buf->error = -EOVERFLOW;
46756 return -EOVERFLOW;
46757 }
46758 +
46759 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46760 + return 0;
46761 +
46762 dirent = buf->previous;
46763 if (dirent) {
46764 if (__put_user(offset, &dirent->d_off))
46765 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46766 buf.previous = NULL;
46767 buf.count = count;
46768 buf.error = 0;
46769 + buf.file = file;
46770
46771 error = vfs_readdir(file, filldir, &buf);
46772 if (error >= 0)
46773 @@ -229,6 +242,7 @@ out:
46774 struct getdents_callback64 {
46775 struct linux_dirent64 __user * current_dir;
46776 struct linux_dirent64 __user * previous;
46777 + struct file *file;
46778 int count;
46779 int error;
46780 };
46781 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46782 buf->error = -EINVAL; /* only used if we fail.. */
46783 if (reclen > buf->count)
46784 return -EINVAL;
46785 +
46786 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46787 + return 0;
46788 +
46789 dirent = buf->previous;
46790 if (dirent) {
46791 if (__put_user(offset, &dirent->d_off))
46792 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46793
46794 buf.current_dir = dirent;
46795 buf.previous = NULL;
46796 + buf.file = file;
46797 buf.count = count;
46798 buf.error = 0;
46799
46800 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46801 error = buf.error;
46802 lastdirent = buf.previous;
46803 if (lastdirent) {
46804 - typeof(lastdirent->d_off) d_off = file->f_pos;
46805 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46806 if (__put_user(d_off, &lastdirent->d_off))
46807 error = -EFAULT;
46808 else
46809 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46810 index 60c0804..d814f98 100644
46811 --- a/fs/reiserfs/do_balan.c
46812 +++ b/fs/reiserfs/do_balan.c
46813 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46814 return;
46815 }
46816
46817 - atomic_inc(&(fs_generation(tb->tb_sb)));
46818 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46819 do_balance_starts(tb);
46820
46821 /* balance leaf returns 0 except if combining L R and S into
46822 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46823 index 7a99811..a7c96c4 100644
46824 --- a/fs/reiserfs/procfs.c
46825 +++ b/fs/reiserfs/procfs.c
46826 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46827 "SMALL_TAILS " : "NO_TAILS ",
46828 replay_only(sb) ? "REPLAY_ONLY " : "",
46829 convert_reiserfs(sb) ? "CONV " : "",
46830 - atomic_read(&r->s_generation_counter),
46831 + atomic_read_unchecked(&r->s_generation_counter),
46832 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46833 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46834 SF(s_good_search_by_key_reada), SF(s_bmaps),
46835 diff --git a/fs/select.c b/fs/select.c
46836 index d33418f..2a5345e 100644
46837 --- a/fs/select.c
46838 +++ b/fs/select.c
46839 @@ -20,6 +20,7 @@
46840 #include <linux/module.h>
46841 #include <linux/slab.h>
46842 #include <linux/poll.h>
46843 +#include <linux/security.h>
46844 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46845 #include <linux/file.h>
46846 #include <linux/fdtable.h>
46847 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46848 struct poll_list *walk = head;
46849 unsigned long todo = nfds;
46850
46851 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46852 if (nfds > rlimit(RLIMIT_NOFILE))
46853 return -EINVAL;
46854
46855 diff --git a/fs/seq_file.c b/fs/seq_file.c
46856 index dba43c3..9fb8511 100644
46857 --- a/fs/seq_file.c
46858 +++ b/fs/seq_file.c
46859 @@ -9,6 +9,7 @@
46860 #include <linux/module.h>
46861 #include <linux/seq_file.h>
46862 #include <linux/slab.h>
46863 +#include <linux/sched.h>
46864
46865 #include <asm/uaccess.h>
46866 #include <asm/page.h>
46867 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
46868 memset(p, 0, sizeof(*p));
46869 mutex_init(&p->lock);
46870 p->op = op;
46871 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46872 + p->exec_id = current->exec_id;
46873 +#endif
46874
46875 /*
46876 * Wrappers around seq_open(e.g. swaps_open) need to be
46877 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46878 return 0;
46879 }
46880 if (!m->buf) {
46881 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46882 + m->size = PAGE_SIZE;
46883 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46884 if (!m->buf)
46885 return -ENOMEM;
46886 }
46887 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46888 Eoverflow:
46889 m->op->stop(m, p);
46890 kfree(m->buf);
46891 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46892 + m->size <<= 1;
46893 + m->buf = kmalloc(m->size, GFP_KERNEL);
46894 return !m->buf ? -ENOMEM : -EAGAIN;
46895 }
46896
46897 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46898 m->version = file->f_version;
46899 /* grab buffer if we didn't have one */
46900 if (!m->buf) {
46901 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46902 + m->size = PAGE_SIZE;
46903 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46904 if (!m->buf)
46905 goto Enomem;
46906 }
46907 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46908 goto Fill;
46909 m->op->stop(m, p);
46910 kfree(m->buf);
46911 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46912 + m->size <<= 1;
46913 + m->buf = kmalloc(m->size, GFP_KERNEL);
46914 if (!m->buf)
46915 goto Enomem;
46916 m->count = 0;
46917 @@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
46918 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46919 void *data)
46920 {
46921 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46922 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46923 int res = -ENOMEM;
46924
46925 if (op) {
46926 diff --git a/fs/splice.c b/fs/splice.c
46927 index fa2defa..8601650 100644
46928 --- a/fs/splice.c
46929 +++ b/fs/splice.c
46930 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46931 pipe_lock(pipe);
46932
46933 for (;;) {
46934 - if (!pipe->readers) {
46935 + if (!atomic_read(&pipe->readers)) {
46936 send_sig(SIGPIPE, current, 0);
46937 if (!ret)
46938 ret = -EPIPE;
46939 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46940 do_wakeup = 0;
46941 }
46942
46943 - pipe->waiting_writers++;
46944 + atomic_inc(&pipe->waiting_writers);
46945 pipe_wait(pipe);
46946 - pipe->waiting_writers--;
46947 + atomic_dec(&pipe->waiting_writers);
46948 }
46949
46950 pipe_unlock(pipe);
46951 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46952 old_fs = get_fs();
46953 set_fs(get_ds());
46954 /* The cast to a user pointer is valid due to the set_fs() */
46955 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46956 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46957 set_fs(old_fs);
46958
46959 return res;
46960 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46961 old_fs = get_fs();
46962 set_fs(get_ds());
46963 /* The cast to a user pointer is valid due to the set_fs() */
46964 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46965 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46966 set_fs(old_fs);
46967
46968 return res;
46969 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46970 goto err;
46971
46972 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46973 - vec[i].iov_base = (void __user *) page_address(page);
46974 + vec[i].iov_base = (void __force_user *) page_address(page);
46975 vec[i].iov_len = this_len;
46976 spd.pages[i] = page;
46977 spd.nr_pages++;
46978 @@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46979 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46980 {
46981 while (!pipe->nrbufs) {
46982 - if (!pipe->writers)
46983 + if (!atomic_read(&pipe->writers))
46984 return 0;
46985
46986 - if (!pipe->waiting_writers && sd->num_spliced)
46987 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46988 return 0;
46989
46990 if (sd->flags & SPLICE_F_NONBLOCK)
46991 @@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
46992 * out of the pipe right after the splice_to_pipe(). So set
46993 * PIPE_READERS appropriately.
46994 */
46995 - pipe->readers = 1;
46996 + atomic_set(&pipe->readers, 1);
46997
46998 current->splice_pipe = pipe;
46999 }
47000 @@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47001 ret = -ERESTARTSYS;
47002 break;
47003 }
47004 - if (!pipe->writers)
47005 + if (!atomic_read(&pipe->writers))
47006 break;
47007 - if (!pipe->waiting_writers) {
47008 + if (!atomic_read(&pipe->waiting_writers)) {
47009 if (flags & SPLICE_F_NONBLOCK) {
47010 ret = -EAGAIN;
47011 break;
47012 @@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47013 pipe_lock(pipe);
47014
47015 while (pipe->nrbufs >= pipe->buffers) {
47016 - if (!pipe->readers) {
47017 + if (!atomic_read(&pipe->readers)) {
47018 send_sig(SIGPIPE, current, 0);
47019 ret = -EPIPE;
47020 break;
47021 @@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47022 ret = -ERESTARTSYS;
47023 break;
47024 }
47025 - pipe->waiting_writers++;
47026 + atomic_inc(&pipe->waiting_writers);
47027 pipe_wait(pipe);
47028 - pipe->waiting_writers--;
47029 + atomic_dec(&pipe->waiting_writers);
47030 }
47031
47032 pipe_unlock(pipe);
47033 @@ -1819,14 +1819,14 @@ retry:
47034 pipe_double_lock(ipipe, opipe);
47035
47036 do {
47037 - if (!opipe->readers) {
47038 + if (!atomic_read(&opipe->readers)) {
47039 send_sig(SIGPIPE, current, 0);
47040 if (!ret)
47041 ret = -EPIPE;
47042 break;
47043 }
47044
47045 - if (!ipipe->nrbufs && !ipipe->writers)
47046 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
47047 break;
47048
47049 /*
47050 @@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
47051 pipe_double_lock(ipipe, opipe);
47052
47053 do {
47054 - if (!opipe->readers) {
47055 + if (!atomic_read(&opipe->readers)) {
47056 send_sig(SIGPIPE, current, 0);
47057 if (!ret)
47058 ret = -EPIPE;
47059 @@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
47060 * return EAGAIN if we have the potential of some data in the
47061 * future, otherwise just return 0
47062 */
47063 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
47064 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
47065 ret = -EAGAIN;
47066
47067 pipe_unlock(ipipe);
47068 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
47069 index 7fdf6a7..e6cd8ad 100644
47070 --- a/fs/sysfs/dir.c
47071 +++ b/fs/sysfs/dir.c
47072 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
47073 struct sysfs_dirent *sd;
47074 int rc;
47075
47076 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
47077 + const char *parent_name = parent_sd->s_name;
47078 +
47079 + mode = S_IFDIR | S_IRWXU;
47080 +
47081 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
47082 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
47083 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
47084 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
47085 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
47086 +#endif
47087 +
47088 /* allocate */
47089 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
47090 if (!sd)
47091 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
47092 index 779789a..f58193c 100644
47093 --- a/fs/sysfs/file.c
47094 +++ b/fs/sysfs/file.c
47095 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
47096
47097 struct sysfs_open_dirent {
47098 atomic_t refcnt;
47099 - atomic_t event;
47100 + atomic_unchecked_t event;
47101 wait_queue_head_t poll;
47102 struct list_head buffers; /* goes through sysfs_buffer.list */
47103 };
47104 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
47105 if (!sysfs_get_active(attr_sd))
47106 return -ENODEV;
47107
47108 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
47109 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
47110 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
47111
47112 sysfs_put_active(attr_sd);
47113 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
47114 return -ENOMEM;
47115
47116 atomic_set(&new_od->refcnt, 0);
47117 - atomic_set(&new_od->event, 1);
47118 + atomic_set_unchecked(&new_od->event, 1);
47119 init_waitqueue_head(&new_od->poll);
47120 INIT_LIST_HEAD(&new_od->buffers);
47121 goto retry;
47122 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
47123
47124 sysfs_put_active(attr_sd);
47125
47126 - if (buffer->event != atomic_read(&od->event))
47127 + if (buffer->event != atomic_read_unchecked(&od->event))
47128 goto trigger;
47129
47130 return DEFAULT_POLLMASK;
47131 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
47132
47133 od = sd->s_attr.open;
47134 if (od) {
47135 - atomic_inc(&od->event);
47136 + atomic_inc_unchecked(&od->event);
47137 wake_up_interruptible(&od->poll);
47138 }
47139
47140 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
47141 index a7ac78f..02158e1 100644
47142 --- a/fs/sysfs/symlink.c
47143 +++ b/fs/sysfs/symlink.c
47144 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47145
47146 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47147 {
47148 - char *page = nd_get_link(nd);
47149 + const char *page = nd_get_link(nd);
47150 if (!IS_ERR(page))
47151 free_page((unsigned long)page);
47152 }
47153 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
47154 index c175b4d..8f36a16 100644
47155 --- a/fs/udf/misc.c
47156 +++ b/fs/udf/misc.c
47157 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
47158
47159 u8 udf_tag_checksum(const struct tag *t)
47160 {
47161 - u8 *data = (u8 *)t;
47162 + const u8 *data = (const u8 *)t;
47163 u8 checksum = 0;
47164 int i;
47165 for (i = 0; i < sizeof(struct tag); ++i)
47166 diff --git a/fs/utimes.c b/fs/utimes.c
47167 index ba653f3..06ea4b1 100644
47168 --- a/fs/utimes.c
47169 +++ b/fs/utimes.c
47170 @@ -1,6 +1,7 @@
47171 #include <linux/compiler.h>
47172 #include <linux/file.h>
47173 #include <linux/fs.h>
47174 +#include <linux/security.h>
47175 #include <linux/linkage.h>
47176 #include <linux/mount.h>
47177 #include <linux/namei.h>
47178 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
47179 goto mnt_drop_write_and_out;
47180 }
47181 }
47182 +
47183 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47184 + error = -EACCES;
47185 + goto mnt_drop_write_and_out;
47186 + }
47187 +
47188 mutex_lock(&inode->i_mutex);
47189 error = notify_change(path->dentry, &newattrs);
47190 mutex_unlock(&inode->i_mutex);
47191 diff --git a/fs/xattr.c b/fs/xattr.c
47192 index 67583de..c5aad14 100644
47193 --- a/fs/xattr.c
47194 +++ b/fs/xattr.c
47195 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47196 * Extended attribute SET operations
47197 */
47198 static long
47199 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
47200 +setxattr(struct path *path, const char __user *name, const void __user *value,
47201 size_t size, int flags)
47202 {
47203 int error;
47204 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
47205 return PTR_ERR(kvalue);
47206 }
47207
47208 - error = vfs_setxattr(d, kname, kvalue, size, flags);
47209 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47210 + error = -EACCES;
47211 + goto out;
47212 + }
47213 +
47214 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47215 +out:
47216 kfree(kvalue);
47217 return error;
47218 }
47219 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
47220 return error;
47221 error = mnt_want_write(path.mnt);
47222 if (!error) {
47223 - error = setxattr(path.dentry, name, value, size, flags);
47224 + error = setxattr(&path, name, value, size, flags);
47225 mnt_drop_write(path.mnt);
47226 }
47227 path_put(&path);
47228 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
47229 return error;
47230 error = mnt_want_write(path.mnt);
47231 if (!error) {
47232 - error = setxattr(path.dentry, name, value, size, flags);
47233 + error = setxattr(&path, name, value, size, flags);
47234 mnt_drop_write(path.mnt);
47235 }
47236 path_put(&path);
47237 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
47238 const void __user *,value, size_t, size, int, flags)
47239 {
47240 struct file *f;
47241 - struct dentry *dentry;
47242 int error = -EBADF;
47243
47244 f = fget(fd);
47245 if (!f)
47246 return error;
47247 - dentry = f->f_path.dentry;
47248 - audit_inode(NULL, dentry);
47249 + audit_inode(NULL, f->f_path.dentry);
47250 error = mnt_want_write_file(f);
47251 if (!error) {
47252 - error = setxattr(dentry, name, value, size, flags);
47253 + error = setxattr(&f->f_path, name, value, size, flags);
47254 mnt_drop_write(f->f_path.mnt);
47255 }
47256 fput(f);
47257 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
47258 index 8d5a506..7f62712 100644
47259 --- a/fs/xattr_acl.c
47260 +++ b/fs/xattr_acl.c
47261 @@ -17,8 +17,8 @@
47262 struct posix_acl *
47263 posix_acl_from_xattr(const void *value, size_t size)
47264 {
47265 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47266 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47267 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47268 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47269 int count;
47270 struct posix_acl *acl;
47271 struct posix_acl_entry *acl_e;
47272 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
47273 index d0ab788..827999b 100644
47274 --- a/fs/xfs/xfs_bmap.c
47275 +++ b/fs/xfs/xfs_bmap.c
47276 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
47277 int nmap,
47278 int ret_nmap);
47279 #else
47280 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47281 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47282 #endif /* DEBUG */
47283
47284 STATIC int
47285 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
47286 index 79d05e8..e3e5861 100644
47287 --- a/fs/xfs/xfs_dir2_sf.c
47288 +++ b/fs/xfs/xfs_dir2_sf.c
47289 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47290 }
47291
47292 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47293 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47294 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47295 + char name[sfep->namelen];
47296 + memcpy(name, sfep->name, sfep->namelen);
47297 + if (filldir(dirent, name, sfep->namelen,
47298 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
47299 + *offset = off & 0x7fffffff;
47300 + return 0;
47301 + }
47302 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47303 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47304 *offset = off & 0x7fffffff;
47305 return 0;
47306 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
47307 index d99a905..9f88202 100644
47308 --- a/fs/xfs/xfs_ioctl.c
47309 +++ b/fs/xfs/xfs_ioctl.c
47310 @@ -128,7 +128,7 @@ xfs_find_handle(
47311 }
47312
47313 error = -EFAULT;
47314 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47315 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47316 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47317 goto out_put;
47318
47319 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
47320 index 23ce927..e274cc1 100644
47321 --- a/fs/xfs/xfs_iops.c
47322 +++ b/fs/xfs/xfs_iops.c
47323 @@ -447,7 +447,7 @@ xfs_vn_put_link(
47324 struct nameidata *nd,
47325 void *p)
47326 {
47327 - char *s = nd_get_link(nd);
47328 + const char *s = nd_get_link(nd);
47329
47330 if (!IS_ERR(s))
47331 kfree(s);
47332 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
47333 new file mode 100644
47334 index 0000000..41df561
47335 --- /dev/null
47336 +++ b/grsecurity/Kconfig
47337 @@ -0,0 +1,1075 @@
47338 +#
47339 +# grecurity configuration
47340 +#
47341 +
47342 +menu "Grsecurity"
47343 +
47344 +config GRKERNSEC
47345 + bool "Grsecurity"
47346 + select CRYPTO
47347 + select CRYPTO_SHA256
47348 + help
47349 + If you say Y here, you will be able to configure many features
47350 + that will enhance the security of your system. It is highly
47351 + recommended that you say Y here and read through the help
47352 + for each option so that you fully understand the features and
47353 + can evaluate their usefulness for your machine.
47354 +
47355 +choice
47356 + prompt "Security Level"
47357 + depends on GRKERNSEC
47358 + default GRKERNSEC_CUSTOM
47359 +
47360 +config GRKERNSEC_LOW
47361 + bool "Low"
47362 + select GRKERNSEC_LINK
47363 + select GRKERNSEC_FIFO
47364 + select GRKERNSEC_RANDNET
47365 + select GRKERNSEC_DMESG
47366 + select GRKERNSEC_CHROOT
47367 + select GRKERNSEC_CHROOT_CHDIR
47368 +
47369 + help
47370 + If you choose this option, several of the grsecurity options will
47371 + be enabled that will give you greater protection against a number
47372 + of attacks, while assuring that none of your software will have any
47373 + conflicts with the additional security measures. If you run a lot
47374 + of unusual software, or you are having problems with the higher
47375 + security levels, you should say Y here. With this option, the
47376 + following features are enabled:
47377 +
47378 + - Linking restrictions
47379 + - FIFO restrictions
47380 + - Restricted dmesg
47381 + - Enforced chdir("/") on chroot
47382 + - Runtime module disabling
47383 +
47384 +config GRKERNSEC_MEDIUM
47385 + bool "Medium"
47386 + select PAX
47387 + select PAX_EI_PAX
47388 + select PAX_PT_PAX_FLAGS
47389 + select PAX_HAVE_ACL_FLAGS
47390 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47391 + select GRKERNSEC_CHROOT
47392 + select GRKERNSEC_CHROOT_SYSCTL
47393 + select GRKERNSEC_LINK
47394 + select GRKERNSEC_FIFO
47395 + select GRKERNSEC_DMESG
47396 + select GRKERNSEC_RANDNET
47397 + select GRKERNSEC_FORKFAIL
47398 + select GRKERNSEC_TIME
47399 + select GRKERNSEC_SIGNAL
47400 + select GRKERNSEC_CHROOT
47401 + select GRKERNSEC_CHROOT_UNIX
47402 + select GRKERNSEC_CHROOT_MOUNT
47403 + select GRKERNSEC_CHROOT_PIVOT
47404 + select GRKERNSEC_CHROOT_DOUBLE
47405 + select GRKERNSEC_CHROOT_CHDIR
47406 + select GRKERNSEC_CHROOT_MKNOD
47407 + select GRKERNSEC_PROC
47408 + select GRKERNSEC_PROC_USERGROUP
47409 + select PAX_RANDUSTACK
47410 + select PAX_ASLR
47411 + select PAX_RANDMMAP
47412 + select PAX_REFCOUNT if (X86 || SPARC64)
47413 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47414 +
47415 + help
47416 + If you say Y here, several features in addition to those included
47417 + in the low additional security level will be enabled. These
47418 + features provide even more security to your system, though in rare
47419 + cases they may be incompatible with very old or poorly written
47420 + software. If you enable this option, make sure that your auth
47421 + service (identd) is running as gid 1001. With this option,
47422 + the following features (in addition to those provided in the
47423 + low additional security level) will be enabled:
47424 +
47425 + - Failed fork logging
47426 + - Time change logging
47427 + - Signal logging
47428 + - Deny mounts in chroot
47429 + - Deny double chrooting
47430 + - Deny sysctl writes in chroot
47431 + - Deny mknod in chroot
47432 + - Deny access to abstract AF_UNIX sockets out of chroot
47433 + - Deny pivot_root in chroot
47434 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47435 + - /proc restrictions with special GID set to 10 (usually wheel)
47436 + - Address Space Layout Randomization (ASLR)
47437 + - Prevent exploitation of most refcount overflows
47438 + - Bounds checking of copying between the kernel and userland
47439 +
47440 +config GRKERNSEC_HIGH
47441 + bool "High"
47442 + select GRKERNSEC_LINK
47443 + select GRKERNSEC_FIFO
47444 + select GRKERNSEC_DMESG
47445 + select GRKERNSEC_FORKFAIL
47446 + select GRKERNSEC_TIME
47447 + select GRKERNSEC_SIGNAL
47448 + select GRKERNSEC_CHROOT
47449 + select GRKERNSEC_CHROOT_SHMAT
47450 + select GRKERNSEC_CHROOT_UNIX
47451 + select GRKERNSEC_CHROOT_MOUNT
47452 + select GRKERNSEC_CHROOT_FCHDIR
47453 + select GRKERNSEC_CHROOT_PIVOT
47454 + select GRKERNSEC_CHROOT_DOUBLE
47455 + select GRKERNSEC_CHROOT_CHDIR
47456 + select GRKERNSEC_CHROOT_MKNOD
47457 + select GRKERNSEC_CHROOT_CAPS
47458 + select GRKERNSEC_CHROOT_SYSCTL
47459 + select GRKERNSEC_CHROOT_FINDTASK
47460 + select GRKERNSEC_SYSFS_RESTRICT
47461 + select GRKERNSEC_PROC
47462 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47463 + select GRKERNSEC_HIDESYM
47464 + select GRKERNSEC_BRUTE
47465 + select GRKERNSEC_PROC_USERGROUP
47466 + select GRKERNSEC_KMEM
47467 + select GRKERNSEC_RESLOG
47468 + select GRKERNSEC_RANDNET
47469 + select GRKERNSEC_PROC_ADD
47470 + select GRKERNSEC_CHROOT_CHMOD
47471 + select GRKERNSEC_CHROOT_NICE
47472 + select GRKERNSEC_SETXID
47473 + select GRKERNSEC_AUDIT_MOUNT
47474 + select GRKERNSEC_MODHARDEN if (MODULES)
47475 + select GRKERNSEC_HARDEN_PTRACE
47476 + select GRKERNSEC_PTRACE_READEXEC
47477 + select GRKERNSEC_VM86 if (X86_32)
47478 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47479 + select PAX
47480 + select PAX_RANDUSTACK
47481 + select PAX_ASLR
47482 + select PAX_RANDMMAP
47483 + select PAX_NOEXEC
47484 + select PAX_MPROTECT
47485 + select PAX_EI_PAX
47486 + select PAX_PT_PAX_FLAGS
47487 + select PAX_HAVE_ACL_FLAGS
47488 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47489 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
47490 + select PAX_RANDKSTACK if (X86_TSC && X86)
47491 + select PAX_SEGMEXEC if (X86_32)
47492 + select PAX_PAGEEXEC
47493 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47494 + select PAX_EMUTRAMP if (PARISC)
47495 + select PAX_EMUSIGRT if (PARISC)
47496 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47497 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47498 + select PAX_REFCOUNT if (X86 || SPARC64)
47499 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47500 + help
47501 + If you say Y here, many of the features of grsecurity will be
47502 + enabled, which will protect you against many kinds of attacks
47503 + against your system. The heightened security comes at a cost
47504 + of an increased chance of incompatibilities with rare software
47505 + on your machine. Since this security level enables PaX, you should
47506 + view <http://pax.grsecurity.net> and read about the PaX
47507 + project. While you are there, download chpax and run it on
47508 + binaries that cause problems with PaX. Also remember that
47509 + since the /proc restrictions are enabled, you must run your
47510 + identd as gid 1001. This security level enables the following
47511 + features in addition to those listed in the low and medium
47512 + security levels:
47513 +
47514 + - Additional /proc restrictions
47515 + - Chmod restrictions in chroot
47516 + - No signals, ptrace, or viewing of processes outside of chroot
47517 + - Capability restrictions in chroot
47518 + - Deny fchdir out of chroot
47519 + - Priority restrictions in chroot
47520 + - Segmentation-based implementation of PaX
47521 + - Mprotect restrictions
47522 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47523 + - Kernel stack randomization
47524 + - Mount/unmount/remount logging
47525 + - Kernel symbol hiding
47526 + - Hardening of module auto-loading
47527 + - Ptrace restrictions
47528 + - Restricted vm86 mode
47529 + - Restricted sysfs/debugfs
47530 + - Active kernel exploit response
47531 +
47532 +config GRKERNSEC_CUSTOM
47533 + bool "Custom"
47534 + help
47535 + If you say Y here, you will be able to configure every grsecurity
47536 + option, which allows you to enable many more features that aren't
47537 + covered in the basic security levels. These additional features
47538 + include TPE, socket restrictions, and the sysctl system for
47539 + grsecurity. It is advised that you read through the help for
47540 + each option to determine its usefulness in your situation.
47541 +
47542 +endchoice
47543 +
47544 +menu "Memory Protections"
47545 +depends on GRKERNSEC
47546 +
47547 +config GRKERNSEC_KMEM
47548 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47549 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47550 + help
47551 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47552 + be written to or read from to modify or leak the contents of the running
47553 + kernel. /dev/port will also not be allowed to be opened. If you have module
47554 + support disabled, enabling this will close up four ways that are
47555 + currently used to insert malicious code into the running kernel.
47556 + Even with all these features enabled, we still highly recommend that
47557 + you use the RBAC system, as it is still possible for an attacker to
47558 + modify the running kernel through privileged I/O granted by ioperm/iopl.
47559 + If you are not using XFree86, you may be able to stop this additional
47560 + case by enabling the 'Disable privileged I/O' option. Though nothing
47561 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47562 + but only to video memory, which is the only writing we allow in this
47563 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47564 + not be allowed to mprotect it with PROT_WRITE later.
47565 + It is highly recommended that you say Y here if you meet all the
47566 + conditions above.
47567 +
47568 +config GRKERNSEC_VM86
47569 + bool "Restrict VM86 mode"
47570 + depends on X86_32
47571 +
47572 + help
47573 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47574 + make use of a special execution mode on 32bit x86 processors called
47575 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47576 + video cards and will still work with this option enabled. The purpose
47577 + of the option is to prevent exploitation of emulation errors in
47578 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
47579 + Nearly all users should be able to enable this option.
47580 +
47581 +config GRKERNSEC_IO
47582 + bool "Disable privileged I/O"
47583 + depends on X86
47584 + select RTC_CLASS
47585 + select RTC_INTF_DEV
47586 + select RTC_DRV_CMOS
47587 +
47588 + help
47589 + If you say Y here, all ioperm and iopl calls will return an error.
47590 + Ioperm and iopl can be used to modify the running kernel.
47591 + Unfortunately, some programs need this access to operate properly,
47592 + the most notable of which are XFree86 and hwclock. hwclock can be
47593 + remedied by having RTC support in the kernel, so real-time
47594 + clock support is enabled if this option is enabled, to ensure
47595 + that hwclock operates correctly. XFree86 still will not
47596 + operate correctly with this option enabled, so DO NOT CHOOSE Y
47597 + IF YOU USE XFree86. If you use XFree86 and you still want to
47598 + protect your kernel against modification, use the RBAC system.
47599 +
47600 +config GRKERNSEC_PROC_MEMMAP
47601 + bool "Harden ASLR against information leaks and entropy reduction"
47602 + default y if (PAX_NOEXEC || PAX_ASLR)
47603 + depends on PAX_NOEXEC || PAX_ASLR
47604 + help
47605 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47606 + give no information about the addresses of its mappings if
47607 + PaX features that rely on random addresses are enabled on the task.
47608 + In addition to sanitizing this information and disabling other
47609 + dangerous sources of information, this option causes reads of sensitive
47610 + /proc/<pid> entries where the file descriptor was opened in a different
47611 + task than the one performing the read. Such attempts are logged.
47612 + Finally, this option limits argv/env strings for suid/sgid binaries
47613 + to 1MB to prevent a complete exhaustion of the stack entropy provided
47614 + by ASLR.
47615 + If you use PaX it is essential that you say Y here as it closes up
47616 + several holes that make full ASLR useless for suid/sgid binaries.
47617 +
47618 +config GRKERNSEC_BRUTE
47619 + bool "Deter exploit bruteforcing"
47620 + help
47621 + If you say Y here, attempts to bruteforce exploits against forking
47622 + daemons such as apache or sshd, as well as against suid/sgid binaries
47623 + will be deterred. When a child of a forking daemon is killed by PaX
47624 + or crashes due to an illegal instruction or other suspicious signal,
47625 + the parent process will be delayed 30 seconds upon every subsequent
47626 + fork until the administrator is able to assess the situation and
47627 + restart the daemon.
47628 + In the suid/sgid case, the attempt is logged, the user has all their
47629 + processes terminated, and they are prevented from executing any further
47630 + processes for 15 minutes.
47631 + It is recommended that you also enable signal logging in the auditing
47632 + section so that logs are generated when a process triggers a suspicious
47633 + signal.
47634 + If the sysctl option is enabled, a sysctl option with name
47635 + "deter_bruteforce" is created.
47636 +
47637 +
47638 +config GRKERNSEC_MODHARDEN
47639 + bool "Harden module auto-loading"
47640 + depends on MODULES
47641 + help
47642 + If you say Y here, module auto-loading in response to use of some
47643 + feature implemented by an unloaded module will be restricted to
47644 + root users. Enabling this option helps defend against attacks
47645 + by unprivileged users who abuse the auto-loading behavior to
47646 + cause a vulnerable module to load that is then exploited.
47647 +
47648 + If this option prevents a legitimate use of auto-loading for a
47649 + non-root user, the administrator can execute modprobe manually
47650 + with the exact name of the module mentioned in the alert log.
47651 + Alternatively, the administrator can add the module to the list
47652 + of modules loaded at boot by modifying init scripts.
47653 +
47654 + Modification of init scripts will most likely be needed on
47655 + Ubuntu servers with encrypted home directory support enabled,
47656 + as the first non-root user logging in will cause the ecb(aes),
47657 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47658 +
47659 +config GRKERNSEC_HIDESYM
47660 + bool "Hide kernel symbols"
47661 + help
47662 + If you say Y here, getting information on loaded modules, and
47663 + displaying all kernel symbols through a syscall will be restricted
47664 + to users with CAP_SYS_MODULE. For software compatibility reasons,
47665 + /proc/kallsyms will be restricted to the root user. The RBAC
47666 + system can hide that entry even from root.
47667 +
47668 + This option also prevents leaking of kernel addresses through
47669 + several /proc entries.
47670 +
47671 + Note that this option is only effective provided the following
47672 + conditions are met:
47673 + 1) The kernel using grsecurity is not precompiled by some distribution
47674 + 2) You have also enabled GRKERNSEC_DMESG
47675 + 3) You are using the RBAC system and hiding other files such as your
47676 + kernel image and System.map. Alternatively, enabling this option
47677 + causes the permissions on /boot, /lib/modules, and the kernel
47678 + source directory to change at compile time to prevent
47679 + reading by non-root users.
47680 + If the above conditions are met, this option will aid in providing a
47681 + useful protection against local kernel exploitation of overflows
47682 + and arbitrary read/write vulnerabilities.
47683 +
47684 +config GRKERNSEC_KERN_LOCKOUT
47685 + bool "Active kernel exploit response"
47686 + depends on X86 || ARM || PPC || SPARC
47687 + help
47688 + If you say Y here, when a PaX alert is triggered due to suspicious
47689 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47690 + or an OOPs occurs due to bad memory accesses, instead of just
47691 + terminating the offending process (and potentially allowing
47692 + a subsequent exploit from the same user), we will take one of two
47693 + actions:
47694 + If the user was root, we will panic the system
47695 + If the user was non-root, we will log the attempt, terminate
47696 + all processes owned by the user, then prevent them from creating
47697 + any new processes until the system is restarted
47698 + This deters repeated kernel exploitation/bruteforcing attempts
47699 + and is useful for later forensics.
47700 +
47701 +endmenu
47702 +menu "Role Based Access Control Options"
47703 +depends on GRKERNSEC
47704 +
47705 +config GRKERNSEC_RBAC_DEBUG
47706 + bool
47707 +
47708 +config GRKERNSEC_NO_RBAC
47709 + bool "Disable RBAC system"
47710 + help
47711 + If you say Y here, the /dev/grsec device will be removed from the kernel,
47712 + preventing the RBAC system from being enabled. You should only say Y
47713 + here if you have no intention of using the RBAC system, so as to prevent
47714 + an attacker with root access from misusing the RBAC system to hide files
47715 + and processes when loadable module support and /dev/[k]mem have been
47716 + locked down.
47717 +
47718 +config GRKERNSEC_ACL_HIDEKERN
47719 + bool "Hide kernel processes"
47720 + help
47721 + If you say Y here, all kernel threads will be hidden to all
47722 + processes but those whose subject has the "view hidden processes"
47723 + flag.
47724 +
47725 +config GRKERNSEC_ACL_MAXTRIES
47726 + int "Maximum tries before password lockout"
47727 + default 3
47728 + help
47729 + This option enforces the maximum number of times a user can attempt
47730 + to authorize themselves with the grsecurity RBAC system before being
47731 + denied the ability to attempt authorization again for a specified time.
47732 + The lower the number, the harder it will be to brute-force a password.
47733 +
47734 +config GRKERNSEC_ACL_TIMEOUT
47735 + int "Time to wait after max password tries, in seconds"
47736 + default 30
47737 + help
47738 + This option specifies the time the user must wait after attempting to
47739 + authorize to the RBAC system with the maximum number of invalid
47740 + passwords. The higher the number, the harder it will be to brute-force
47741 + a password.
47742 +
47743 +endmenu
47744 +menu "Filesystem Protections"
47745 +depends on GRKERNSEC
47746 +
47747 +config GRKERNSEC_PROC
47748 + bool "Proc restrictions"
47749 + help
47750 + If you say Y here, the permissions of the /proc filesystem
47751 + will be altered to enhance system security and privacy. You MUST
47752 + choose either a user only restriction or a user and group restriction.
47753 + Depending upon the option you choose, you can either restrict users to
47754 + see only the processes they themselves run, or choose a group that can
47755 + view all processes and files normally restricted to root if you choose
47756 + the "restrict to user only" option. NOTE: If you're running identd as
47757 + a non-root user, you will have to run it as the group you specify here.
47758 +
47759 +config GRKERNSEC_PROC_USER
47760 + bool "Restrict /proc to user only"
47761 + depends on GRKERNSEC_PROC
47762 + help
47763 + If you say Y here, non-root users will only be able to view their own
47764 + processes, and restricts them from viewing network-related information,
47765 + and viewing kernel symbol and module information.
47766 +
47767 +config GRKERNSEC_PROC_USERGROUP
47768 + bool "Allow special group"
47769 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47770 + help
47771 + If you say Y here, you will be able to select a group that will be
47772 + able to view all processes and network-related information. If you've
47773 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47774 + remain hidden. This option is useful if you want to run identd as
47775 + a non-root user.
47776 +
47777 +config GRKERNSEC_PROC_GID
47778 + int "GID for special group"
47779 + depends on GRKERNSEC_PROC_USERGROUP
47780 + default 1001
47781 +
47782 +config GRKERNSEC_PROC_ADD
47783 + bool "Additional restrictions"
47784 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47785 + help
47786 + If you say Y here, additional restrictions will be placed on
47787 + /proc that keep normal users from viewing device information and
47788 + slabinfo information that could be useful for exploits.
47789 +
47790 +config GRKERNSEC_LINK
47791 + bool "Linking restrictions"
47792 + help
47793 + If you say Y here, /tmp race exploits will be prevented, since users
47794 + will no longer be able to follow symlinks owned by other users in
47795 + world-writable +t directories (e.g. /tmp), unless the owner of the
47796 + symlink is the owner of the directory. users will also not be
47797 + able to hardlink to files they do not own. If the sysctl option is
47798 + enabled, a sysctl option with name "linking_restrictions" is created.
47799 +
47800 +config GRKERNSEC_FIFO
47801 + bool "FIFO restrictions"
47802 + help
47803 + If you say Y here, users will not be able to write to FIFOs they don't
47804 + own in world-writable +t directories (e.g. /tmp), unless the owner of
47805 + the FIFO is the same owner of the directory it's held in. If the sysctl
47806 + option is enabled, a sysctl option with name "fifo_restrictions" is
47807 + created.
47808 +
47809 +config GRKERNSEC_SYSFS_RESTRICT
47810 + bool "Sysfs/debugfs restriction"
47811 + depends on SYSFS
47812 + help
47813 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47814 + any filesystem normally mounted under it (e.g. debugfs) will be
47815 + mostly accessible only by root. These filesystems generally provide access
47816 + to hardware and debug information that isn't appropriate for unprivileged
47817 + users of the system. Sysfs and debugfs have also become a large source
47818 + of new vulnerabilities, ranging from infoleaks to local compromise.
47819 + There has been very little oversight with an eye toward security involved
47820 + in adding new exporters of information to these filesystems, so their
47821 + use is discouraged.
47822 + For reasons of compatibility, a few directories have been whitelisted
47823 + for access by non-root users:
47824 + /sys/fs/selinux
47825 + /sys/fs/fuse
47826 + /sys/devices/system/cpu
47827 +
47828 +config GRKERNSEC_ROFS
47829 + bool "Runtime read-only mount protection"
47830 + help
47831 + If you say Y here, a sysctl option with name "romount_protect" will
47832 + be created. By setting this option to 1 at runtime, filesystems
47833 + will be protected in the following ways:
47834 + * No new writable mounts will be allowed
47835 + * Existing read-only mounts won't be able to be remounted read/write
47836 + * Write operations will be denied on all block devices
47837 + This option acts independently of grsec_lock: once it is set to 1,
47838 + it cannot be turned off. Therefore, please be mindful of the resulting
47839 + behavior if this option is enabled in an init script on a read-only
47840 + filesystem. This feature is mainly intended for secure embedded systems.
47841 +
47842 +config GRKERNSEC_CHROOT
47843 + bool "Chroot jail restrictions"
47844 + help
47845 + If you say Y here, you will be able to choose several options that will
47846 + make breaking out of a chrooted jail much more difficult. If you
47847 + encounter no software incompatibilities with the following options, it
47848 + is recommended that you enable each one.
47849 +
47850 +config GRKERNSEC_CHROOT_MOUNT
47851 + bool "Deny mounts"
47852 + depends on GRKERNSEC_CHROOT
47853 + help
47854 + If you say Y here, processes inside a chroot will not be able to
47855 + mount or remount filesystems. If the sysctl option is enabled, a
47856 + sysctl option with name "chroot_deny_mount" is created.
47857 +
47858 +config GRKERNSEC_CHROOT_DOUBLE
47859 + bool "Deny double-chroots"
47860 + depends on GRKERNSEC_CHROOT
47861 + help
47862 + If you say Y here, processes inside a chroot will not be able to chroot
47863 + again outside the chroot. This is a widely used method of breaking
47864 + out of a chroot jail and should not be allowed. If the sysctl
47865 + option is enabled, a sysctl option with name
47866 + "chroot_deny_chroot" is created.
47867 +
47868 +config GRKERNSEC_CHROOT_PIVOT
47869 + bool "Deny pivot_root in chroot"
47870 + depends on GRKERNSEC_CHROOT
47871 + help
47872 + If you say Y here, processes inside a chroot will not be able to use
47873 + a function called pivot_root() that was introduced in Linux 2.3.41. It
47874 + works similar to chroot in that it changes the root filesystem. This
47875 + function could be misused in a chrooted process to attempt to break out
47876 + of the chroot, and therefore should not be allowed. If the sysctl
47877 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
47878 + created.
47879 +
47880 +config GRKERNSEC_CHROOT_CHDIR
47881 + bool "Enforce chdir(\"/\") on all chroots"
47882 + depends on GRKERNSEC_CHROOT
47883 + help
47884 + If you say Y here, the current working directory of all newly-chrooted
47885 + applications will be set to the the root directory of the chroot.
47886 + The man page on chroot(2) states:
47887 + Note that this call does not change the current working
47888 + directory, so that `.' can be outside the tree rooted at
47889 + `/'. In particular, the super-user can escape from a
47890 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47891 +
47892 + It is recommended that you say Y here, since it's not known to break
47893 + any software. If the sysctl option is enabled, a sysctl option with
47894 + name "chroot_enforce_chdir" is created.
47895 +
47896 +config GRKERNSEC_CHROOT_CHMOD
47897 + bool "Deny (f)chmod +s"
47898 + depends on GRKERNSEC_CHROOT
47899 + help
47900 + If you say Y here, processes inside a chroot will not be able to chmod
47901 + or fchmod files to make them have suid or sgid bits. This protects
47902 + against another published method of breaking a chroot. If the sysctl
47903 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
47904 + created.
47905 +
47906 +config GRKERNSEC_CHROOT_FCHDIR
47907 + bool "Deny fchdir out of chroot"
47908 + depends on GRKERNSEC_CHROOT
47909 + help
47910 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
47911 + to a file descriptor of the chrooting process that points to a directory
47912 + outside the filesystem will be stopped. If the sysctl option
47913 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47914 +
47915 +config GRKERNSEC_CHROOT_MKNOD
47916 + bool "Deny mknod"
47917 + depends on GRKERNSEC_CHROOT
47918 + help
47919 + If you say Y here, processes inside a chroot will not be allowed to
47920 + mknod. The problem with using mknod inside a chroot is that it
47921 + would allow an attacker to create a device entry that is the same
47922 + as one on the physical root of your system, which could range from
47923 + anything from the console device to a device for your harddrive (which
47924 + they could then use to wipe the drive or steal data). It is recommended
47925 + that you say Y here, unless you run into software incompatibilities.
47926 + If the sysctl option is enabled, a sysctl option with name
47927 + "chroot_deny_mknod" is created.
47928 +
47929 +config GRKERNSEC_CHROOT_SHMAT
47930 + bool "Deny shmat() out of chroot"
47931 + depends on GRKERNSEC_CHROOT
47932 + help
47933 + If you say Y here, processes inside a chroot will not be able to attach
47934 + to shared memory segments that were created outside of the chroot jail.
47935 + It is recommended that you say Y here. If the sysctl option is enabled,
47936 + a sysctl option with name "chroot_deny_shmat" is created.
47937 +
47938 +config GRKERNSEC_CHROOT_UNIX
47939 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
47940 + depends on GRKERNSEC_CHROOT
47941 + help
47942 + If you say Y here, processes inside a chroot will not be able to
47943 + connect to abstract (meaning not belonging to a filesystem) Unix
47944 + domain sockets that were bound outside of a chroot. It is recommended
47945 + that you say Y here. If the sysctl option is enabled, a sysctl option
47946 + with name "chroot_deny_unix" is created.
47947 +
47948 +config GRKERNSEC_CHROOT_FINDTASK
47949 + bool "Protect outside processes"
47950 + depends on GRKERNSEC_CHROOT
47951 + help
47952 + If you say Y here, processes inside a chroot will not be able to
47953 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47954 + getsid, or view any process outside of the chroot. If the sysctl
47955 + option is enabled, a sysctl option with name "chroot_findtask" is
47956 + created.
47957 +
47958 +config GRKERNSEC_CHROOT_NICE
47959 + bool "Restrict priority changes"
47960 + depends on GRKERNSEC_CHROOT
47961 + help
47962 + If you say Y here, processes inside a chroot will not be able to raise
47963 + the priority of processes in the chroot, or alter the priority of
47964 + processes outside the chroot. This provides more security than simply
47965 + removing CAP_SYS_NICE from the process' capability set. If the
47966 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47967 + is created.
47968 +
47969 +config GRKERNSEC_CHROOT_SYSCTL
47970 + bool "Deny sysctl writes"
47971 + depends on GRKERNSEC_CHROOT
47972 + help
47973 + If you say Y here, an attacker in a chroot will not be able to
47974 + write to sysctl entries, either by sysctl(2) or through a /proc
47975 + interface. It is strongly recommended that you say Y here. If the
47976 + sysctl option is enabled, a sysctl option with name
47977 + "chroot_deny_sysctl" is created.
47978 +
47979 +config GRKERNSEC_CHROOT_CAPS
47980 + bool "Capability restrictions"
47981 + depends on GRKERNSEC_CHROOT
47982 + help
47983 + If you say Y here, the capabilities on all processes within a
47984 + chroot jail will be lowered to stop module insertion, raw i/o,
47985 + system and net admin tasks, rebooting the system, modifying immutable
47986 + files, modifying IPC owned by another, and changing the system time.
47987 + This is left an option because it can break some apps. Disable this
47988 + if your chrooted apps are having problems performing those kinds of
47989 + tasks. If the sysctl option is enabled, a sysctl option with
47990 + name "chroot_caps" is created.
47991 +
47992 +endmenu
47993 +menu "Kernel Auditing"
47994 +depends on GRKERNSEC
47995 +
47996 +config GRKERNSEC_AUDIT_GROUP
47997 + bool "Single group for auditing"
47998 + help
47999 + If you say Y here, the exec, chdir, and (un)mount logging features
48000 + will only operate on a group you specify. This option is recommended
48001 + if you only want to watch certain users instead of having a large
48002 + amount of logs from the entire system. If the sysctl option is enabled,
48003 + a sysctl option with name "audit_group" is created.
48004 +
48005 +config GRKERNSEC_AUDIT_GID
48006 + int "GID for auditing"
48007 + depends on GRKERNSEC_AUDIT_GROUP
48008 + default 1007
48009 +
48010 +config GRKERNSEC_EXECLOG
48011 + bool "Exec logging"
48012 + help
48013 + If you say Y here, all execve() calls will be logged (since the
48014 + other exec*() calls are frontends to execve(), all execution
48015 + will be logged). Useful for shell-servers that like to keep track
48016 + of their users. If the sysctl option is enabled, a sysctl option with
48017 + name "exec_logging" is created.
48018 + WARNING: This option when enabled will produce a LOT of logs, especially
48019 + on an active system.
48020 +
48021 +config GRKERNSEC_RESLOG
48022 + bool "Resource logging"
48023 + help
48024 + If you say Y here, all attempts to overstep resource limits will
48025 + be logged with the resource name, the requested size, and the current
48026 + limit. It is highly recommended that you say Y here. If the sysctl
48027 + option is enabled, a sysctl option with name "resource_logging" is
48028 + created. If the RBAC system is enabled, the sysctl value is ignored.
48029 +
48030 +config GRKERNSEC_CHROOT_EXECLOG
48031 + bool "Log execs within chroot"
48032 + help
48033 + If you say Y here, all executions inside a chroot jail will be logged
48034 + to syslog. This can cause a large amount of logs if certain
48035 + applications (eg. djb's daemontools) are installed on the system, and
48036 + is therefore left as an option. If the sysctl option is enabled, a
48037 + sysctl option with name "chroot_execlog" is created.
48038 +
48039 +config GRKERNSEC_AUDIT_PTRACE
48040 + bool "Ptrace logging"
48041 + help
48042 + If you say Y here, all attempts to attach to a process via ptrace
48043 + will be logged. If the sysctl option is enabled, a sysctl option
48044 + with name "audit_ptrace" is created.
48045 +
48046 +config GRKERNSEC_AUDIT_CHDIR
48047 + bool "Chdir logging"
48048 + help
48049 + If you say Y here, all chdir() calls will be logged. If the sysctl
48050 + option is enabled, a sysctl option with name "audit_chdir" is created.
48051 +
48052 +config GRKERNSEC_AUDIT_MOUNT
48053 + bool "(Un)Mount logging"
48054 + help
48055 + If you say Y here, all mounts and unmounts will be logged. If the
48056 + sysctl option is enabled, a sysctl option with name "audit_mount" is
48057 + created.
48058 +
48059 +config GRKERNSEC_SIGNAL
48060 + bool "Signal logging"
48061 + help
48062 + If you say Y here, certain important signals will be logged, such as
48063 + SIGSEGV, which will as a result inform you of when a error in a program
48064 + occurred, which in some cases could mean a possible exploit attempt.
48065 + If the sysctl option is enabled, a sysctl option with name
48066 + "signal_logging" is created.
48067 +
48068 +config GRKERNSEC_FORKFAIL
48069 + bool "Fork failure logging"
48070 + help
48071 + If you say Y here, all failed fork() attempts will be logged.
48072 + This could suggest a fork bomb, or someone attempting to overstep
48073 + their process limit. If the sysctl option is enabled, a sysctl option
48074 + with name "forkfail_logging" is created.
48075 +
48076 +config GRKERNSEC_TIME
48077 + bool "Time change logging"
48078 + help
48079 + If you say Y here, any changes of the system clock will be logged.
48080 + If the sysctl option is enabled, a sysctl option with name
48081 + "timechange_logging" is created.
48082 +
48083 +config GRKERNSEC_PROC_IPADDR
48084 + bool "/proc/<pid>/ipaddr support"
48085 + help
48086 + If you say Y here, a new entry will be added to each /proc/<pid>
48087 + directory that contains the IP address of the person using the task.
48088 + The IP is carried across local TCP and AF_UNIX stream sockets.
48089 + This information can be useful for IDS/IPSes to perform remote response
48090 + to a local attack. The entry is readable by only the owner of the
48091 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
48092 + the RBAC system), and thus does not create privacy concerns.
48093 +
48094 +config GRKERNSEC_RWXMAP_LOG
48095 + bool 'Denied RWX mmap/mprotect logging'
48096 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
48097 + help
48098 + If you say Y here, calls to mmap() and mprotect() with explicit
48099 + usage of PROT_WRITE and PROT_EXEC together will be logged when
48100 + denied by the PAX_MPROTECT feature. If the sysctl option is
48101 + enabled, a sysctl option with name "rwxmap_logging" is created.
48102 +
48103 +config GRKERNSEC_AUDIT_TEXTREL
48104 + bool 'ELF text relocations logging (READ HELP)'
48105 + depends on PAX_MPROTECT
48106 + help
48107 + If you say Y here, text relocations will be logged with the filename
48108 + of the offending library or binary. The purpose of the feature is
48109 + to help Linux distribution developers get rid of libraries and
48110 + binaries that need text relocations which hinder the future progress
48111 + of PaX. Only Linux distribution developers should say Y here, and
48112 + never on a production machine, as this option creates an information
48113 + leak that could aid an attacker in defeating the randomization of
48114 + a single memory region. If the sysctl option is enabled, a sysctl
48115 + option with name "audit_textrel" is created.
48116 +
48117 +endmenu
48118 +
48119 +menu "Executable Protections"
48120 +depends on GRKERNSEC
48121 +
48122 +config GRKERNSEC_DMESG
48123 + bool "Dmesg(8) restriction"
48124 + help
48125 + If you say Y here, non-root users will not be able to use dmesg(8)
48126 + to view up to the last 4kb of messages in the kernel's log buffer.
48127 + The kernel's log buffer often contains kernel addresses and other
48128 + identifying information useful to an attacker in fingerprinting a
48129 + system for a targeted exploit.
48130 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
48131 + created.
48132 +
48133 +config GRKERNSEC_HARDEN_PTRACE
48134 + bool "Deter ptrace-based process snooping"
48135 + help
48136 + If you say Y here, TTY sniffers and other malicious monitoring
48137 + programs implemented through ptrace will be defeated. If you
48138 + have been using the RBAC system, this option has already been
48139 + enabled for several years for all users, with the ability to make
48140 + fine-grained exceptions.
48141 +
48142 + This option only affects the ability of non-root users to ptrace
48143 + processes that are not a descendent of the ptracing process.
48144 + This means that strace ./binary and gdb ./binary will still work,
48145 + but attaching to arbitrary processes will not. If the sysctl
48146 + option is enabled, a sysctl option with name "harden_ptrace" is
48147 + created.
48148 +
48149 +config GRKERNSEC_PTRACE_READEXEC
48150 + bool "Require read access to ptrace sensitive binaries"
48151 + help
48152 + If you say Y here, unprivileged users will not be able to ptrace unreadable
48153 + binaries. This option is useful in environments that
48154 + remove the read bits (e.g. file mode 4711) from suid binaries to
48155 + prevent infoleaking of their contents. This option adds
48156 + consistency to the use of that file mode, as the binary could normally
48157 + be read out when run without privileges while ptracing.
48158 +
48159 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
48160 + is created.
48161 +
48162 +config GRKERNSEC_SETXID
48163 + bool "Enforce consistent multithreaded privileges"
48164 + help
48165 + If you say Y here, a change from a root uid to a non-root uid
48166 + in a multithreaded application will cause the resulting uids,
48167 + gids, supplementary groups, and capabilities in that thread
48168 + to be propagated to the other threads of the process. In most
48169 + cases this is unnecessary, as glibc will emulate this behavior
48170 + on behalf of the application. Other libcs do not act in the
48171 + same way, allowing the other threads of the process to continue
48172 + running with root privileges. If the sysctl option is enabled,
48173 + a sysctl option with name "consistent_setxid" is created.
48174 +
48175 +config GRKERNSEC_TPE
48176 + bool "Trusted Path Execution (TPE)"
48177 + help
48178 + If you say Y here, you will be able to choose a gid to add to the
48179 + supplementary groups of users you want to mark as "untrusted."
48180 + These users will not be able to execute any files that are not in
48181 + root-owned directories writable only by root. If the sysctl option
48182 + is enabled, a sysctl option with name "tpe" is created.
48183 +
48184 +config GRKERNSEC_TPE_ALL
48185 + bool "Partially restrict all non-root users"
48186 + depends on GRKERNSEC_TPE
48187 + help
48188 + If you say Y here, all non-root users will be covered under
48189 + a weaker TPE restriction. This is separate from, and in addition to,
48190 + the main TPE options that you have selected elsewhere. Thus, if a
48191 + "trusted" GID is chosen, this restriction applies to even that GID.
48192 + Under this restriction, all non-root users will only be allowed to
48193 + execute files in directories they own that are not group or
48194 + world-writable, or in directories owned by root and writable only by
48195 + root. If the sysctl option is enabled, a sysctl option with name
48196 + "tpe_restrict_all" is created.
48197 +
48198 +config GRKERNSEC_TPE_INVERT
48199 + bool "Invert GID option"
48200 + depends on GRKERNSEC_TPE
48201 + help
48202 + If you say Y here, the group you specify in the TPE configuration will
48203 + decide what group TPE restrictions will be *disabled* for. This
48204 + option is useful if you want TPE restrictions to be applied to most
48205 + users on the system. If the sysctl option is enabled, a sysctl option
48206 + with name "tpe_invert" is created. Unlike other sysctl options, this
48207 + entry will default to on for backward-compatibility.
48208 +
48209 +config GRKERNSEC_TPE_GID
48210 + int "GID for untrusted users"
48211 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
48212 + default 1005
48213 + help
48214 + Setting this GID determines what group TPE restrictions will be
48215 + *enabled* for. If the sysctl option is enabled, a sysctl option
48216 + with name "tpe_gid" is created.
48217 +
48218 +config GRKERNSEC_TPE_GID
48219 + int "GID for trusted users"
48220 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
48221 + default 1005
48222 + help
48223 + Setting this GID determines what group TPE restrictions will be
48224 + *disabled* for. If the sysctl option is enabled, a sysctl option
48225 + with name "tpe_gid" is created.
48226 +
48227 +endmenu
48228 +menu "Network Protections"
48229 +depends on GRKERNSEC
48230 +
48231 +config GRKERNSEC_RANDNET
48232 + bool "Larger entropy pools"
48233 + help
48234 + If you say Y here, the entropy pools used for many features of Linux
48235 + and grsecurity will be doubled in size. Since several grsecurity
48236 + features use additional randomness, it is recommended that you say Y
48237 + here. Saying Y here has a similar effect as modifying
48238 + /proc/sys/kernel/random/poolsize.
48239 +
48240 +config GRKERNSEC_BLACKHOLE
48241 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
48242 + depends on NET
48243 + help
48244 + If you say Y here, neither TCP resets nor ICMP
48245 + destination-unreachable packets will be sent in response to packets
48246 + sent to ports for which no associated listening process exists.
48247 + This feature supports both IPV4 and IPV6 and exempts the
48248 + loopback interface from blackholing. Enabling this feature
48249 + makes a host more resilient to DoS attacks and reduces network
48250 + visibility against scanners.
48251 +
48252 + The blackhole feature as-implemented is equivalent to the FreeBSD
48253 + blackhole feature, as it prevents RST responses to all packets, not
48254 + just SYNs. Under most application behavior this causes no
48255 + problems, but applications (like haproxy) may not close certain
48256 + connections in a way that cleanly terminates them on the remote
48257 + end, leaving the remote host in LAST_ACK state. Because of this
48258 + side-effect and to prevent intentional LAST_ACK DoSes, this
48259 + feature also adds automatic mitigation against such attacks.
48260 + The mitigation drastically reduces the amount of time a socket
48261 + can spend in LAST_ACK state. If you're using haproxy and not
48262 + all servers it connects to have this option enabled, consider
48263 + disabling this feature on the haproxy host.
48264 +
48265 + If the sysctl option is enabled, two sysctl options with names
48266 + "ip_blackhole" and "lastack_retries" will be created.
48267 + While "ip_blackhole" takes the standard zero/non-zero on/off
48268 + toggle, "lastack_retries" uses the same kinds of values as
48269 + "tcp_retries1" and "tcp_retries2". The default value of 4
48270 + prevents a socket from lasting more than 45 seconds in LAST_ACK
48271 + state.
48272 +
48273 +config GRKERNSEC_SOCKET
48274 + bool "Socket restrictions"
48275 + depends on NET
48276 + help
48277 + If you say Y here, you will be able to choose from several options.
48278 + If you assign a GID on your system and add it to the supplementary
48279 + groups of users you want to restrict socket access to, this patch
48280 + will perform up to three things, based on the option(s) you choose.
48281 +
48282 +config GRKERNSEC_SOCKET_ALL
48283 + bool "Deny any sockets to group"
48284 + depends on GRKERNSEC_SOCKET
48285 + help
48286 + If you say Y here, you will be able to choose a GID of whose users will
48287 + be unable to connect to other hosts from your machine or run server
48288 + applications from your machine. If the sysctl option is enabled, a
48289 + sysctl option with name "socket_all" is created.
48290 +
48291 +config GRKERNSEC_SOCKET_ALL_GID
48292 + int "GID to deny all sockets for"
48293 + depends on GRKERNSEC_SOCKET_ALL
48294 + default 1004
48295 + help
48296 + Here you can choose the GID to disable socket access for. Remember to
48297 + add the users you want socket access disabled for to the GID
48298 + specified here. If the sysctl option is enabled, a sysctl option
48299 + with name "socket_all_gid" is created.
48300 +
48301 +config GRKERNSEC_SOCKET_CLIENT
48302 + bool "Deny client sockets to group"
48303 + depends on GRKERNSEC_SOCKET
48304 + help
48305 + If you say Y here, you will be able to choose a GID of whose users will
48306 + be unable to connect to other hosts from your machine, but will be
48307 + able to run servers. If this option is enabled, all users in the group
48308 + you specify will have to use passive mode when initiating ftp transfers
48309 + from the shell on your machine. If the sysctl option is enabled, a
48310 + sysctl option with name "socket_client" is created.
48311 +
48312 +config GRKERNSEC_SOCKET_CLIENT_GID
48313 + int "GID to deny client sockets for"
48314 + depends on GRKERNSEC_SOCKET_CLIENT
48315 + default 1003
48316 + help
48317 + Here you can choose the GID to disable client socket access for.
48318 + Remember to add the users you want client socket access disabled for to
48319 + the GID specified here. If the sysctl option is enabled, a sysctl
48320 + option with name "socket_client_gid" is created.
48321 +
48322 +config GRKERNSEC_SOCKET_SERVER
48323 + bool "Deny server sockets to group"
48324 + depends on GRKERNSEC_SOCKET
48325 + help
48326 + If you say Y here, you will be able to choose a GID of whose users will
48327 + be unable to run server applications from your machine. If the sysctl
48328 + option is enabled, a sysctl option with name "socket_server" is created.
48329 +
48330 +config GRKERNSEC_SOCKET_SERVER_GID
48331 + int "GID to deny server sockets for"
48332 + depends on GRKERNSEC_SOCKET_SERVER
48333 + default 1002
48334 + help
48335 + Here you can choose the GID to disable server socket access for.
48336 + Remember to add the users you want server socket access disabled for to
48337 + the GID specified here. If the sysctl option is enabled, a sysctl
48338 + option with name "socket_server_gid" is created.
48339 +
48340 +endmenu
48341 +menu "Sysctl support"
48342 +depends on GRKERNSEC && SYSCTL
48343 +
48344 +config GRKERNSEC_SYSCTL
48345 + bool "Sysctl support"
48346 + help
48347 + If you say Y here, you will be able to change the options that
48348 + grsecurity runs with at bootup, without having to recompile your
48349 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
48350 + to enable (1) or disable (0) various features. All the sysctl entries
48351 + are mutable until the "grsec_lock" entry is set to a non-zero value.
48352 + All features enabled in the kernel configuration are disabled at boot
48353 + if you do not say Y to the "Turn on features by default" option.
48354 + All options should be set at startup, and the grsec_lock entry should
48355 + be set to a non-zero value after all the options are set.
48356 + *THIS IS EXTREMELY IMPORTANT*
48357 +
48358 +config GRKERNSEC_SYSCTL_DISTRO
48359 + bool "Extra sysctl support for distro makers (READ HELP)"
48360 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48361 + help
48362 + If you say Y here, additional sysctl options will be created
48363 + for features that affect processes running as root. Therefore,
48364 + it is critical when using this option that the grsec_lock entry be
48365 + enabled after boot. Only distros with prebuilt kernel packages
48366 + with this option enabled that can ensure grsec_lock is enabled
48367 + after boot should use this option.
48368 + *Failure to set grsec_lock after boot makes all grsec features
48369 + this option covers useless*
48370 +
48371 + Currently this option creates the following sysctl entries:
48372 + "Disable Privileged I/O": "disable_priv_io"
48373 +
48374 +config GRKERNSEC_SYSCTL_ON
48375 + bool "Turn on features by default"
48376 + depends on GRKERNSEC_SYSCTL
48377 + help
48378 + If you say Y here, instead of having all features enabled in the
48379 + kernel configuration disabled at boot time, the features will be
48380 + enabled at boot time. It is recommended you say Y here unless
48381 + there is some reason you would want all sysctl-tunable features to
48382 + be disabled by default. As mentioned elsewhere, it is important
48383 + to enable the grsec_lock entry once you have finished modifying
48384 + the sysctl entries.
48385 +
48386 +endmenu
48387 +menu "Logging Options"
48388 +depends on GRKERNSEC
48389 +
48390 +config GRKERNSEC_FLOODTIME
48391 + int "Seconds in between log messages (minimum)"
48392 + default 10
48393 + help
48394 + This option allows you to enforce the number of seconds between
48395 + grsecurity log messages. The default should be suitable for most
48396 + people, however, if you choose to change it, choose a value small enough
48397 + to allow informative logs to be produced, but large enough to
48398 + prevent flooding.
48399 +
48400 +config GRKERNSEC_FLOODBURST
48401 + int "Number of messages in a burst (maximum)"
48402 + default 6
48403 + help
48404 + This option allows you to choose the maximum number of messages allowed
48405 + within the flood time interval you chose in a separate option. The
48406 + default should be suitable for most people, however if you find that
48407 + many of your logs are being interpreted as flooding, you may want to
48408 + raise this value.
48409 +
48410 +endmenu
48411 +
48412 +endmenu
48413 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48414 new file mode 100644
48415 index 0000000..496e60d
48416 --- /dev/null
48417 +++ b/grsecurity/Makefile
48418 @@ -0,0 +1,40 @@
48419 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48420 +# during 2001-2009 it has been completely redesigned by Brad Spengler
48421 +# into an RBAC system
48422 +#
48423 +# All code in this directory and various hooks inserted throughout the kernel
48424 +# are copyright Brad Spengler - Open Source Security, Inc., and released
48425 +# under the GPL v2 or higher
48426 +
48427 +ifndef CONFIG_IA64
48428 +KBUILD_CFLAGS += -Werror
48429 +endif
48430 +
48431 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48432 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
48433 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48434 +
48435 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48436 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48437 + gracl_learn.o grsec_log.o
48438 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48439 +
48440 +ifdef CONFIG_NET
48441 +obj-y += grsec_sock.o
48442 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48443 +endif
48444 +
48445 +ifndef CONFIG_GRKERNSEC
48446 +obj-y += grsec_disabled.o
48447 +endif
48448 +
48449 +ifdef CONFIG_GRKERNSEC_HIDESYM
48450 +extra-y := grsec_hidesym.o
48451 +$(obj)/grsec_hidesym.o:
48452 + @-chmod -f 500 /boot
48453 + @-chmod -f 500 /lib/modules
48454 + @-chmod -f 500 /lib64/modules
48455 + @-chmod -f 500 /lib32/modules
48456 + @-chmod -f 700 .
48457 + @echo ' grsec: protected kernel image paths'
48458 +endif
48459 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48460 new file mode 100644
48461 index 0000000..7715893
48462 --- /dev/null
48463 +++ b/grsecurity/gracl.c
48464 @@ -0,0 +1,4164 @@
48465 +#include <linux/kernel.h>
48466 +#include <linux/module.h>
48467 +#include <linux/sched.h>
48468 +#include <linux/mm.h>
48469 +#include <linux/file.h>
48470 +#include <linux/fs.h>
48471 +#include <linux/namei.h>
48472 +#include <linux/mount.h>
48473 +#include <linux/tty.h>
48474 +#include <linux/proc_fs.h>
48475 +#include <linux/lglock.h>
48476 +#include <linux/slab.h>
48477 +#include <linux/vmalloc.h>
48478 +#include <linux/types.h>
48479 +#include <linux/sysctl.h>
48480 +#include <linux/netdevice.h>
48481 +#include <linux/ptrace.h>
48482 +#include <linux/gracl.h>
48483 +#include <linux/gralloc.h>
48484 +#include <linux/security.h>
48485 +#include <linux/grinternal.h>
48486 +#include <linux/pid_namespace.h>
48487 +#include <linux/fdtable.h>
48488 +#include <linux/percpu.h>
48489 +
48490 +#include <asm/uaccess.h>
48491 +#include <asm/errno.h>
48492 +#include <asm/mman.h>
48493 +
48494 +static struct acl_role_db acl_role_set;
48495 +static struct name_db name_set;
48496 +static struct inodev_db inodev_set;
48497 +
48498 +/* for keeping track of userspace pointers used for subjects, so we
48499 + can share references in the kernel as well
48500 +*/
48501 +
48502 +static struct path real_root;
48503 +
48504 +static struct acl_subj_map_db subj_map_set;
48505 +
48506 +static struct acl_role_label *default_role;
48507 +
48508 +static struct acl_role_label *role_list;
48509 +
48510 +static u16 acl_sp_role_value;
48511 +
48512 +extern char *gr_shared_page[4];
48513 +static DEFINE_MUTEX(gr_dev_mutex);
48514 +DEFINE_RWLOCK(gr_inode_lock);
48515 +
48516 +struct gr_arg *gr_usermode;
48517 +
48518 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
48519 +
48520 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48521 +extern void gr_clear_learn_entries(void);
48522 +
48523 +#ifdef CONFIG_GRKERNSEC_RESLOG
48524 +extern void gr_log_resource(const struct task_struct *task,
48525 + const int res, const unsigned long wanted, const int gt);
48526 +#endif
48527 +
48528 +unsigned char *gr_system_salt;
48529 +unsigned char *gr_system_sum;
48530 +
48531 +static struct sprole_pw **acl_special_roles = NULL;
48532 +static __u16 num_sprole_pws = 0;
48533 +
48534 +static struct acl_role_label *kernel_role = NULL;
48535 +
48536 +static unsigned int gr_auth_attempts = 0;
48537 +static unsigned long gr_auth_expires = 0UL;
48538 +
48539 +#ifdef CONFIG_NET
48540 +extern struct vfsmount *sock_mnt;
48541 +#endif
48542 +
48543 +extern struct vfsmount *pipe_mnt;
48544 +extern struct vfsmount *shm_mnt;
48545 +#ifdef CONFIG_HUGETLBFS
48546 +extern struct vfsmount *hugetlbfs_vfsmount;
48547 +#endif
48548 +
48549 +static struct acl_object_label *fakefs_obj_rw;
48550 +static struct acl_object_label *fakefs_obj_rwx;
48551 +
48552 +extern int gr_init_uidset(void);
48553 +extern void gr_free_uidset(void);
48554 +extern void gr_remove_uid(uid_t uid);
48555 +extern int gr_find_uid(uid_t uid);
48556 +
48557 +DECLARE_BRLOCK(vfsmount_lock);
48558 +
48559 +__inline__ int
48560 +gr_acl_is_enabled(void)
48561 +{
48562 + return (gr_status & GR_READY);
48563 +}
48564 +
48565 +#ifdef CONFIG_BTRFS_FS
48566 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48567 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48568 +#endif
48569 +
48570 +static inline dev_t __get_dev(const struct dentry *dentry)
48571 +{
48572 +#ifdef CONFIG_BTRFS_FS
48573 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48574 + return get_btrfs_dev_from_inode(dentry->d_inode);
48575 + else
48576 +#endif
48577 + return dentry->d_inode->i_sb->s_dev;
48578 +}
48579 +
48580 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48581 +{
48582 + return __get_dev(dentry);
48583 +}
48584 +
48585 +static char gr_task_roletype_to_char(struct task_struct *task)
48586 +{
48587 + switch (task->role->roletype &
48588 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48589 + GR_ROLE_SPECIAL)) {
48590 + case GR_ROLE_DEFAULT:
48591 + return 'D';
48592 + case GR_ROLE_USER:
48593 + return 'U';
48594 + case GR_ROLE_GROUP:
48595 + return 'G';
48596 + case GR_ROLE_SPECIAL:
48597 + return 'S';
48598 + }
48599 +
48600 + return 'X';
48601 +}
48602 +
48603 +char gr_roletype_to_char(void)
48604 +{
48605 + return gr_task_roletype_to_char(current);
48606 +}
48607 +
48608 +__inline__ int
48609 +gr_acl_tpe_check(void)
48610 +{
48611 + if (unlikely(!(gr_status & GR_READY)))
48612 + return 0;
48613 + if (current->role->roletype & GR_ROLE_TPE)
48614 + return 1;
48615 + else
48616 + return 0;
48617 +}
48618 +
48619 +int
48620 +gr_handle_rawio(const struct inode *inode)
48621 +{
48622 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48623 + if (inode && S_ISBLK(inode->i_mode) &&
48624 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48625 + !capable(CAP_SYS_RAWIO))
48626 + return 1;
48627 +#endif
48628 + return 0;
48629 +}
48630 +
48631 +static int
48632 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48633 +{
48634 + if (likely(lena != lenb))
48635 + return 0;
48636 +
48637 + return !memcmp(a, b, lena);
48638 +}
48639 +
48640 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48641 +{
48642 + *buflen -= namelen;
48643 + if (*buflen < 0)
48644 + return -ENAMETOOLONG;
48645 + *buffer -= namelen;
48646 + memcpy(*buffer, str, namelen);
48647 + return 0;
48648 +}
48649 +
48650 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48651 +{
48652 + return prepend(buffer, buflen, name->name, name->len);
48653 +}
48654 +
48655 +static int prepend_path(const struct path *path, struct path *root,
48656 + char **buffer, int *buflen)
48657 +{
48658 + struct dentry *dentry = path->dentry;
48659 + struct vfsmount *vfsmnt = path->mnt;
48660 + bool slash = false;
48661 + int error = 0;
48662 +
48663 + while (dentry != root->dentry || vfsmnt != root->mnt) {
48664 + struct dentry * parent;
48665 +
48666 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48667 + /* Global root? */
48668 + if (vfsmnt->mnt_parent == vfsmnt) {
48669 + goto out;
48670 + }
48671 + dentry = vfsmnt->mnt_mountpoint;
48672 + vfsmnt = vfsmnt->mnt_parent;
48673 + continue;
48674 + }
48675 + parent = dentry->d_parent;
48676 + prefetch(parent);
48677 + spin_lock(&dentry->d_lock);
48678 + error = prepend_name(buffer, buflen, &dentry->d_name);
48679 + spin_unlock(&dentry->d_lock);
48680 + if (!error)
48681 + error = prepend(buffer, buflen, "/", 1);
48682 + if (error)
48683 + break;
48684 +
48685 + slash = true;
48686 + dentry = parent;
48687 + }
48688 +
48689 +out:
48690 + if (!error && !slash)
48691 + error = prepend(buffer, buflen, "/", 1);
48692 +
48693 + return error;
48694 +}
48695 +
48696 +/* this must be called with vfsmount_lock and rename_lock held */
48697 +
48698 +static char *__our_d_path(const struct path *path, struct path *root,
48699 + char *buf, int buflen)
48700 +{
48701 + char *res = buf + buflen;
48702 + int error;
48703 +
48704 + prepend(&res, &buflen, "\0", 1);
48705 + error = prepend_path(path, root, &res, &buflen);
48706 + if (error)
48707 + return ERR_PTR(error);
48708 +
48709 + return res;
48710 +}
48711 +
48712 +static char *
48713 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48714 +{
48715 + char *retval;
48716 +
48717 + retval = __our_d_path(path, root, buf, buflen);
48718 + if (unlikely(IS_ERR(retval)))
48719 + retval = strcpy(buf, "<path too long>");
48720 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48721 + retval[1] = '\0';
48722 +
48723 + return retval;
48724 +}
48725 +
48726 +static char *
48727 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48728 + char *buf, int buflen)
48729 +{
48730 + struct path path;
48731 + char *res;
48732 +
48733 + path.dentry = (struct dentry *)dentry;
48734 + path.mnt = (struct vfsmount *)vfsmnt;
48735 +
48736 + /* we can use real_root.dentry, real_root.mnt, because this is only called
48737 + by the RBAC system */
48738 + res = gen_full_path(&path, &real_root, buf, buflen);
48739 +
48740 + return res;
48741 +}
48742 +
48743 +static char *
48744 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48745 + char *buf, int buflen)
48746 +{
48747 + char *res;
48748 + struct path path;
48749 + struct path root;
48750 + struct task_struct *reaper = &init_task;
48751 +
48752 + path.dentry = (struct dentry *)dentry;
48753 + path.mnt = (struct vfsmount *)vfsmnt;
48754 +
48755 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48756 + get_fs_root(reaper->fs, &root);
48757 +
48758 + write_seqlock(&rename_lock);
48759 + br_read_lock(vfsmount_lock);
48760 + res = gen_full_path(&path, &root, buf, buflen);
48761 + br_read_unlock(vfsmount_lock);
48762 + write_sequnlock(&rename_lock);
48763 +
48764 + path_put(&root);
48765 + return res;
48766 +}
48767 +
48768 +static char *
48769 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48770 +{
48771 + char *ret;
48772 + write_seqlock(&rename_lock);
48773 + br_read_lock(vfsmount_lock);
48774 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48775 + PAGE_SIZE);
48776 + br_read_unlock(vfsmount_lock);
48777 + write_sequnlock(&rename_lock);
48778 + return ret;
48779 +}
48780 +
48781 +static char *
48782 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48783 +{
48784 + char *ret;
48785 + char *buf;
48786 + int buflen;
48787 +
48788 + write_seqlock(&rename_lock);
48789 + br_read_lock(vfsmount_lock);
48790 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48791 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48792 + buflen = (int)(ret - buf);
48793 + if (buflen >= 5)
48794 + prepend(&ret, &buflen, "/proc", 5);
48795 + else
48796 + ret = strcpy(buf, "<path too long>");
48797 + br_read_unlock(vfsmount_lock);
48798 + write_sequnlock(&rename_lock);
48799 + return ret;
48800 +}
48801 +
48802 +char *
48803 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48804 +{
48805 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48806 + PAGE_SIZE);
48807 +}
48808 +
48809 +char *
48810 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48811 +{
48812 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48813 + PAGE_SIZE);
48814 +}
48815 +
48816 +char *
48817 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48818 +{
48819 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48820 + PAGE_SIZE);
48821 +}
48822 +
48823 +char *
48824 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48825 +{
48826 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48827 + PAGE_SIZE);
48828 +}
48829 +
48830 +char *
48831 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48832 +{
48833 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48834 + PAGE_SIZE);
48835 +}
48836 +
48837 +__inline__ __u32
48838 +to_gr_audit(const __u32 reqmode)
48839 +{
48840 + /* masks off auditable permission flags, then shifts them to create
48841 + auditing flags, and adds the special case of append auditing if
48842 + we're requesting write */
48843 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48844 +}
48845 +
48846 +struct acl_subject_label *
48847 +lookup_subject_map(const struct acl_subject_label *userp)
48848 +{
48849 + unsigned int index = shash(userp, subj_map_set.s_size);
48850 + struct subject_map *match;
48851 +
48852 + match = subj_map_set.s_hash[index];
48853 +
48854 + while (match && match->user != userp)
48855 + match = match->next;
48856 +
48857 + if (match != NULL)
48858 + return match->kernel;
48859 + else
48860 + return NULL;
48861 +}
48862 +
48863 +static void
48864 +insert_subj_map_entry(struct subject_map *subjmap)
48865 +{
48866 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48867 + struct subject_map **curr;
48868 +
48869 + subjmap->prev = NULL;
48870 +
48871 + curr = &subj_map_set.s_hash[index];
48872 + if (*curr != NULL)
48873 + (*curr)->prev = subjmap;
48874 +
48875 + subjmap->next = *curr;
48876 + *curr = subjmap;
48877 +
48878 + return;
48879 +}
48880 +
48881 +static struct acl_role_label *
48882 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48883 + const gid_t gid)
48884 +{
48885 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48886 + struct acl_role_label *match;
48887 + struct role_allowed_ip *ipp;
48888 + unsigned int x;
48889 + u32 curr_ip = task->signal->curr_ip;
48890 +
48891 + task->signal->saved_ip = curr_ip;
48892 +
48893 + match = acl_role_set.r_hash[index];
48894 +
48895 + while (match) {
48896 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48897 + for (x = 0; x < match->domain_child_num; x++) {
48898 + if (match->domain_children[x] == uid)
48899 + goto found;
48900 + }
48901 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48902 + break;
48903 + match = match->next;
48904 + }
48905 +found:
48906 + if (match == NULL) {
48907 + try_group:
48908 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48909 + match = acl_role_set.r_hash[index];
48910 +
48911 + while (match) {
48912 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48913 + for (x = 0; x < match->domain_child_num; x++) {
48914 + if (match->domain_children[x] == gid)
48915 + goto found2;
48916 + }
48917 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48918 + break;
48919 + match = match->next;
48920 + }
48921 +found2:
48922 + if (match == NULL)
48923 + match = default_role;
48924 + if (match->allowed_ips == NULL)
48925 + return match;
48926 + else {
48927 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48928 + if (likely
48929 + ((ntohl(curr_ip) & ipp->netmask) ==
48930 + (ntohl(ipp->addr) & ipp->netmask)))
48931 + return match;
48932 + }
48933 + match = default_role;
48934 + }
48935 + } else if (match->allowed_ips == NULL) {
48936 + return match;
48937 + } else {
48938 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48939 + if (likely
48940 + ((ntohl(curr_ip) & ipp->netmask) ==
48941 + (ntohl(ipp->addr) & ipp->netmask)))
48942 + return match;
48943 + }
48944 + goto try_group;
48945 + }
48946 +
48947 + return match;
48948 +}
48949 +
48950 +struct acl_subject_label *
48951 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48952 + const struct acl_role_label *role)
48953 +{
48954 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48955 + struct acl_subject_label *match;
48956 +
48957 + match = role->subj_hash[index];
48958 +
48959 + while (match && (match->inode != ino || match->device != dev ||
48960 + (match->mode & GR_DELETED))) {
48961 + match = match->next;
48962 + }
48963 +
48964 + if (match && !(match->mode & GR_DELETED))
48965 + return match;
48966 + else
48967 + return NULL;
48968 +}
48969 +
48970 +struct acl_subject_label *
48971 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48972 + const struct acl_role_label *role)
48973 +{
48974 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48975 + struct acl_subject_label *match;
48976 +
48977 + match = role->subj_hash[index];
48978 +
48979 + while (match && (match->inode != ino || match->device != dev ||
48980 + !(match->mode & GR_DELETED))) {
48981 + match = match->next;
48982 + }
48983 +
48984 + if (match && (match->mode & GR_DELETED))
48985 + return match;
48986 + else
48987 + return NULL;
48988 +}
48989 +
48990 +static struct acl_object_label *
48991 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48992 + const struct acl_subject_label *subj)
48993 +{
48994 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48995 + struct acl_object_label *match;
48996 +
48997 + match = subj->obj_hash[index];
48998 +
48999 + while (match && (match->inode != ino || match->device != dev ||
49000 + (match->mode & GR_DELETED))) {
49001 + match = match->next;
49002 + }
49003 +
49004 + if (match && !(match->mode & GR_DELETED))
49005 + return match;
49006 + else
49007 + return NULL;
49008 +}
49009 +
49010 +static struct acl_object_label *
49011 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
49012 + const struct acl_subject_label *subj)
49013 +{
49014 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
49015 + struct acl_object_label *match;
49016 +
49017 + match = subj->obj_hash[index];
49018 +
49019 + while (match && (match->inode != ino || match->device != dev ||
49020 + !(match->mode & GR_DELETED))) {
49021 + match = match->next;
49022 + }
49023 +
49024 + if (match && (match->mode & GR_DELETED))
49025 + return match;
49026 +
49027 + match = subj->obj_hash[index];
49028 +
49029 + while (match && (match->inode != ino || match->device != dev ||
49030 + (match->mode & GR_DELETED))) {
49031 + match = match->next;
49032 + }
49033 +
49034 + if (match && !(match->mode & GR_DELETED))
49035 + return match;
49036 + else
49037 + return NULL;
49038 +}
49039 +
49040 +static struct name_entry *
49041 +lookup_name_entry(const char *name)
49042 +{
49043 + unsigned int len = strlen(name);
49044 + unsigned int key = full_name_hash(name, len);
49045 + unsigned int index = key % name_set.n_size;
49046 + struct name_entry *match;
49047 +
49048 + match = name_set.n_hash[index];
49049 +
49050 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
49051 + match = match->next;
49052 +
49053 + return match;
49054 +}
49055 +
49056 +static struct name_entry *
49057 +lookup_name_entry_create(const char *name)
49058 +{
49059 + unsigned int len = strlen(name);
49060 + unsigned int key = full_name_hash(name, len);
49061 + unsigned int index = key % name_set.n_size;
49062 + struct name_entry *match;
49063 +
49064 + match = name_set.n_hash[index];
49065 +
49066 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
49067 + !match->deleted))
49068 + match = match->next;
49069 +
49070 + if (match && match->deleted)
49071 + return match;
49072 +
49073 + match = name_set.n_hash[index];
49074 +
49075 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
49076 + match->deleted))
49077 + match = match->next;
49078 +
49079 + if (match && !match->deleted)
49080 + return match;
49081 + else
49082 + return NULL;
49083 +}
49084 +
49085 +static struct inodev_entry *
49086 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
49087 +{
49088 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
49089 + struct inodev_entry *match;
49090 +
49091 + match = inodev_set.i_hash[index];
49092 +
49093 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
49094 + match = match->next;
49095 +
49096 + return match;
49097 +}
49098 +
49099 +static void
49100 +insert_inodev_entry(struct inodev_entry *entry)
49101 +{
49102 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
49103 + inodev_set.i_size);
49104 + struct inodev_entry **curr;
49105 +
49106 + entry->prev = NULL;
49107 +
49108 + curr = &inodev_set.i_hash[index];
49109 + if (*curr != NULL)
49110 + (*curr)->prev = entry;
49111 +
49112 + entry->next = *curr;
49113 + *curr = entry;
49114 +
49115 + return;
49116 +}
49117 +
49118 +static void
49119 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
49120 +{
49121 + unsigned int index =
49122 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
49123 + struct acl_role_label **curr;
49124 + struct acl_role_label *tmp;
49125 +
49126 + curr = &acl_role_set.r_hash[index];
49127 +
49128 + /* if role was already inserted due to domains and already has
49129 + a role in the same bucket as it attached, then we need to
49130 + combine these two buckets
49131 + */
49132 + if (role->next) {
49133 + tmp = role->next;
49134 + while (tmp->next)
49135 + tmp = tmp->next;
49136 + tmp->next = *curr;
49137 + } else
49138 + role->next = *curr;
49139 + *curr = role;
49140 +
49141 + return;
49142 +}
49143 +
49144 +static void
49145 +insert_acl_role_label(struct acl_role_label *role)
49146 +{
49147 + int i;
49148 +
49149 + if (role_list == NULL) {
49150 + role_list = role;
49151 + role->prev = NULL;
49152 + } else {
49153 + role->prev = role_list;
49154 + role_list = role;
49155 + }
49156 +
49157 + /* used for hash chains */
49158 + role->next = NULL;
49159 +
49160 + if (role->roletype & GR_ROLE_DOMAIN) {
49161 + for (i = 0; i < role->domain_child_num; i++)
49162 + __insert_acl_role_label(role, role->domain_children[i]);
49163 + } else
49164 + __insert_acl_role_label(role, role->uidgid);
49165 +}
49166 +
49167 +static int
49168 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
49169 +{
49170 + struct name_entry **curr, *nentry;
49171 + struct inodev_entry *ientry;
49172 + unsigned int len = strlen(name);
49173 + unsigned int key = full_name_hash(name, len);
49174 + unsigned int index = key % name_set.n_size;
49175 +
49176 + curr = &name_set.n_hash[index];
49177 +
49178 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
49179 + curr = &((*curr)->next);
49180 +
49181 + if (*curr != NULL)
49182 + return 1;
49183 +
49184 + nentry = acl_alloc(sizeof (struct name_entry));
49185 + if (nentry == NULL)
49186 + return 0;
49187 + ientry = acl_alloc(sizeof (struct inodev_entry));
49188 + if (ientry == NULL)
49189 + return 0;
49190 + ientry->nentry = nentry;
49191 +
49192 + nentry->key = key;
49193 + nentry->name = name;
49194 + nentry->inode = inode;
49195 + nentry->device = device;
49196 + nentry->len = len;
49197 + nentry->deleted = deleted;
49198 +
49199 + nentry->prev = NULL;
49200 + curr = &name_set.n_hash[index];
49201 + if (*curr != NULL)
49202 + (*curr)->prev = nentry;
49203 + nentry->next = *curr;
49204 + *curr = nentry;
49205 +
49206 + /* insert us into the table searchable by inode/dev */
49207 + insert_inodev_entry(ientry);
49208 +
49209 + return 1;
49210 +}
49211 +
49212 +static void
49213 +insert_acl_obj_label(struct acl_object_label *obj,
49214 + struct acl_subject_label *subj)
49215 +{
49216 + unsigned int index =
49217 + fhash(obj->inode, obj->device, subj->obj_hash_size);
49218 + struct acl_object_label **curr;
49219 +
49220 +
49221 + obj->prev = NULL;
49222 +
49223 + curr = &subj->obj_hash[index];
49224 + if (*curr != NULL)
49225 + (*curr)->prev = obj;
49226 +
49227 + obj->next = *curr;
49228 + *curr = obj;
49229 +
49230 + return;
49231 +}
49232 +
49233 +static void
49234 +insert_acl_subj_label(struct acl_subject_label *obj,
49235 + struct acl_role_label *role)
49236 +{
49237 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
49238 + struct acl_subject_label **curr;
49239 +
49240 + obj->prev = NULL;
49241 +
49242 + curr = &role->subj_hash[index];
49243 + if (*curr != NULL)
49244 + (*curr)->prev = obj;
49245 +
49246 + obj->next = *curr;
49247 + *curr = obj;
49248 +
49249 + return;
49250 +}
49251 +
49252 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
49253 +
49254 +static void *
49255 +create_table(__u32 * len, int elementsize)
49256 +{
49257 + unsigned int table_sizes[] = {
49258 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
49259 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
49260 + 4194301, 8388593, 16777213, 33554393, 67108859
49261 + };
49262 + void *newtable = NULL;
49263 + unsigned int pwr = 0;
49264 +
49265 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
49266 + table_sizes[pwr] <= *len)
49267 + pwr++;
49268 +
49269 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
49270 + return newtable;
49271 +
49272 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
49273 + newtable =
49274 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
49275 + else
49276 + newtable = vmalloc(table_sizes[pwr] * elementsize);
49277 +
49278 + *len = table_sizes[pwr];
49279 +
49280 + return newtable;
49281 +}
49282 +
49283 +static int
49284 +init_variables(const struct gr_arg *arg)
49285 +{
49286 + struct task_struct *reaper = &init_task;
49287 + unsigned int stacksize;
49288 +
49289 + subj_map_set.s_size = arg->role_db.num_subjects;
49290 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
49291 + name_set.n_size = arg->role_db.num_objects;
49292 + inodev_set.i_size = arg->role_db.num_objects;
49293 +
49294 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
49295 + !name_set.n_size || !inodev_set.i_size)
49296 + return 1;
49297 +
49298 + if (!gr_init_uidset())
49299 + return 1;
49300 +
49301 + /* set up the stack that holds allocation info */
49302 +
49303 + stacksize = arg->role_db.num_pointers + 5;
49304 +
49305 + if (!acl_alloc_stack_init(stacksize))
49306 + return 1;
49307 +
49308 + /* grab reference for the real root dentry and vfsmount */
49309 + get_fs_root(reaper->fs, &real_root);
49310 +
49311 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49312 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
49313 +#endif
49314 +
49315 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
49316 + if (fakefs_obj_rw == NULL)
49317 + return 1;
49318 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
49319 +
49320 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
49321 + if (fakefs_obj_rwx == NULL)
49322 + return 1;
49323 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
49324 +
49325 + subj_map_set.s_hash =
49326 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
49327 + acl_role_set.r_hash =
49328 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
49329 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
49330 + inodev_set.i_hash =
49331 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
49332 +
49333 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
49334 + !name_set.n_hash || !inodev_set.i_hash)
49335 + return 1;
49336 +
49337 + memset(subj_map_set.s_hash, 0,
49338 + sizeof(struct subject_map *) * subj_map_set.s_size);
49339 + memset(acl_role_set.r_hash, 0,
49340 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
49341 + memset(name_set.n_hash, 0,
49342 + sizeof (struct name_entry *) * name_set.n_size);
49343 + memset(inodev_set.i_hash, 0,
49344 + sizeof (struct inodev_entry *) * inodev_set.i_size);
49345 +
49346 + return 0;
49347 +}
49348 +
49349 +/* free information not needed after startup
49350 + currently contains user->kernel pointer mappings for subjects
49351 +*/
49352 +
49353 +static void
49354 +free_init_variables(void)
49355 +{
49356 + __u32 i;
49357 +
49358 + if (subj_map_set.s_hash) {
49359 + for (i = 0; i < subj_map_set.s_size; i++) {
49360 + if (subj_map_set.s_hash[i]) {
49361 + kfree(subj_map_set.s_hash[i]);
49362 + subj_map_set.s_hash[i] = NULL;
49363 + }
49364 + }
49365 +
49366 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49367 + PAGE_SIZE)
49368 + kfree(subj_map_set.s_hash);
49369 + else
49370 + vfree(subj_map_set.s_hash);
49371 + }
49372 +
49373 + return;
49374 +}
49375 +
49376 +static void
49377 +free_variables(void)
49378 +{
49379 + struct acl_subject_label *s;
49380 + struct acl_role_label *r;
49381 + struct task_struct *task, *task2;
49382 + unsigned int x;
49383 +
49384 + gr_clear_learn_entries();
49385 +
49386 + read_lock(&tasklist_lock);
49387 + do_each_thread(task2, task) {
49388 + task->acl_sp_role = 0;
49389 + task->acl_role_id = 0;
49390 + task->acl = NULL;
49391 + task->role = NULL;
49392 + } while_each_thread(task2, task);
49393 + read_unlock(&tasklist_lock);
49394 +
49395 + /* release the reference to the real root dentry and vfsmount */
49396 + path_put(&real_root);
49397 +
49398 + /* free all object hash tables */
49399 +
49400 + FOR_EACH_ROLE_START(r)
49401 + if (r->subj_hash == NULL)
49402 + goto next_role;
49403 + FOR_EACH_SUBJECT_START(r, s, x)
49404 + if (s->obj_hash == NULL)
49405 + break;
49406 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49407 + kfree(s->obj_hash);
49408 + else
49409 + vfree(s->obj_hash);
49410 + FOR_EACH_SUBJECT_END(s, x)
49411 + FOR_EACH_NESTED_SUBJECT_START(r, s)
49412 + if (s->obj_hash == NULL)
49413 + break;
49414 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49415 + kfree(s->obj_hash);
49416 + else
49417 + vfree(s->obj_hash);
49418 + FOR_EACH_NESTED_SUBJECT_END(s)
49419 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49420 + kfree(r->subj_hash);
49421 + else
49422 + vfree(r->subj_hash);
49423 + r->subj_hash = NULL;
49424 +next_role:
49425 + FOR_EACH_ROLE_END(r)
49426 +
49427 + acl_free_all();
49428 +
49429 + if (acl_role_set.r_hash) {
49430 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49431 + PAGE_SIZE)
49432 + kfree(acl_role_set.r_hash);
49433 + else
49434 + vfree(acl_role_set.r_hash);
49435 + }
49436 + if (name_set.n_hash) {
49437 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
49438 + PAGE_SIZE)
49439 + kfree(name_set.n_hash);
49440 + else
49441 + vfree(name_set.n_hash);
49442 + }
49443 +
49444 + if (inodev_set.i_hash) {
49445 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49446 + PAGE_SIZE)
49447 + kfree(inodev_set.i_hash);
49448 + else
49449 + vfree(inodev_set.i_hash);
49450 + }
49451 +
49452 + gr_free_uidset();
49453 +
49454 + memset(&name_set, 0, sizeof (struct name_db));
49455 + memset(&inodev_set, 0, sizeof (struct inodev_db));
49456 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49457 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49458 +
49459 + default_role = NULL;
49460 + role_list = NULL;
49461 +
49462 + return;
49463 +}
49464 +
49465 +static __u32
49466 +count_user_objs(struct acl_object_label *userp)
49467 +{
49468 + struct acl_object_label o_tmp;
49469 + __u32 num = 0;
49470 +
49471 + while (userp) {
49472 + if (copy_from_user(&o_tmp, userp,
49473 + sizeof (struct acl_object_label)))
49474 + break;
49475 +
49476 + userp = o_tmp.prev;
49477 + num++;
49478 + }
49479 +
49480 + return num;
49481 +}
49482 +
49483 +static struct acl_subject_label *
49484 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49485 +
49486 +static int
49487 +copy_user_glob(struct acl_object_label *obj)
49488 +{
49489 + struct acl_object_label *g_tmp, **guser;
49490 + unsigned int len;
49491 + char *tmp;
49492 +
49493 + if (obj->globbed == NULL)
49494 + return 0;
49495 +
49496 + guser = &obj->globbed;
49497 + while (*guser) {
49498 + g_tmp = (struct acl_object_label *)
49499 + acl_alloc(sizeof (struct acl_object_label));
49500 + if (g_tmp == NULL)
49501 + return -ENOMEM;
49502 +
49503 + if (copy_from_user(g_tmp, *guser,
49504 + sizeof (struct acl_object_label)))
49505 + return -EFAULT;
49506 +
49507 + len = strnlen_user(g_tmp->filename, PATH_MAX);
49508 +
49509 + if (!len || len >= PATH_MAX)
49510 + return -EINVAL;
49511 +
49512 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49513 + return -ENOMEM;
49514 +
49515 + if (copy_from_user(tmp, g_tmp->filename, len))
49516 + return -EFAULT;
49517 + tmp[len-1] = '\0';
49518 + g_tmp->filename = tmp;
49519 +
49520 + *guser = g_tmp;
49521 + guser = &(g_tmp->next);
49522 + }
49523 +
49524 + return 0;
49525 +}
49526 +
49527 +static int
49528 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49529 + struct acl_role_label *role)
49530 +{
49531 + struct acl_object_label *o_tmp;
49532 + unsigned int len;
49533 + int ret;
49534 + char *tmp;
49535 +
49536 + while (userp) {
49537 + if ((o_tmp = (struct acl_object_label *)
49538 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
49539 + return -ENOMEM;
49540 +
49541 + if (copy_from_user(o_tmp, userp,
49542 + sizeof (struct acl_object_label)))
49543 + return -EFAULT;
49544 +
49545 + userp = o_tmp->prev;
49546 +
49547 + len = strnlen_user(o_tmp->filename, PATH_MAX);
49548 +
49549 + if (!len || len >= PATH_MAX)
49550 + return -EINVAL;
49551 +
49552 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49553 + return -ENOMEM;
49554 +
49555 + if (copy_from_user(tmp, o_tmp->filename, len))
49556 + return -EFAULT;
49557 + tmp[len-1] = '\0';
49558 + o_tmp->filename = tmp;
49559 +
49560 + insert_acl_obj_label(o_tmp, subj);
49561 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49562 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49563 + return -ENOMEM;
49564 +
49565 + ret = copy_user_glob(o_tmp);
49566 + if (ret)
49567 + return ret;
49568 +
49569 + if (o_tmp->nested) {
49570 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49571 + if (IS_ERR(o_tmp->nested))
49572 + return PTR_ERR(o_tmp->nested);
49573 +
49574 + /* insert into nested subject list */
49575 + o_tmp->nested->next = role->hash->first;
49576 + role->hash->first = o_tmp->nested;
49577 + }
49578 + }
49579 +
49580 + return 0;
49581 +}
49582 +
49583 +static __u32
49584 +count_user_subjs(struct acl_subject_label *userp)
49585 +{
49586 + struct acl_subject_label s_tmp;
49587 + __u32 num = 0;
49588 +
49589 + while (userp) {
49590 + if (copy_from_user(&s_tmp, userp,
49591 + sizeof (struct acl_subject_label)))
49592 + break;
49593 +
49594 + userp = s_tmp.prev;
49595 + /* do not count nested subjects against this count, since
49596 + they are not included in the hash table, but are
49597 + attached to objects. We have already counted
49598 + the subjects in userspace for the allocation
49599 + stack
49600 + */
49601 + if (!(s_tmp.mode & GR_NESTED))
49602 + num++;
49603 + }
49604 +
49605 + return num;
49606 +}
49607 +
49608 +static int
49609 +copy_user_allowedips(struct acl_role_label *rolep)
49610 +{
49611 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49612 +
49613 + ruserip = rolep->allowed_ips;
49614 +
49615 + while (ruserip) {
49616 + rlast = rtmp;
49617 +
49618 + if ((rtmp = (struct role_allowed_ip *)
49619 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49620 + return -ENOMEM;
49621 +
49622 + if (copy_from_user(rtmp, ruserip,
49623 + sizeof (struct role_allowed_ip)))
49624 + return -EFAULT;
49625 +
49626 + ruserip = rtmp->prev;
49627 +
49628 + if (!rlast) {
49629 + rtmp->prev = NULL;
49630 + rolep->allowed_ips = rtmp;
49631 + } else {
49632 + rlast->next = rtmp;
49633 + rtmp->prev = rlast;
49634 + }
49635 +
49636 + if (!ruserip)
49637 + rtmp->next = NULL;
49638 + }
49639 +
49640 + return 0;
49641 +}
49642 +
49643 +static int
49644 +copy_user_transitions(struct acl_role_label *rolep)
49645 +{
49646 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
49647 +
49648 + unsigned int len;
49649 + char *tmp;
49650 +
49651 + rusertp = rolep->transitions;
49652 +
49653 + while (rusertp) {
49654 + rlast = rtmp;
49655 +
49656 + if ((rtmp = (struct role_transition *)
49657 + acl_alloc(sizeof (struct role_transition))) == NULL)
49658 + return -ENOMEM;
49659 +
49660 + if (copy_from_user(rtmp, rusertp,
49661 + sizeof (struct role_transition)))
49662 + return -EFAULT;
49663 +
49664 + rusertp = rtmp->prev;
49665 +
49666 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49667 +
49668 + if (!len || len >= GR_SPROLE_LEN)
49669 + return -EINVAL;
49670 +
49671 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49672 + return -ENOMEM;
49673 +
49674 + if (copy_from_user(tmp, rtmp->rolename, len))
49675 + return -EFAULT;
49676 + tmp[len-1] = '\0';
49677 + rtmp->rolename = tmp;
49678 +
49679 + if (!rlast) {
49680 + rtmp->prev = NULL;
49681 + rolep->transitions = rtmp;
49682 + } else {
49683 + rlast->next = rtmp;
49684 + rtmp->prev = rlast;
49685 + }
49686 +
49687 + if (!rusertp)
49688 + rtmp->next = NULL;
49689 + }
49690 +
49691 + return 0;
49692 +}
49693 +
49694 +static struct acl_subject_label *
49695 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49696 +{
49697 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49698 + unsigned int len;
49699 + char *tmp;
49700 + __u32 num_objs;
49701 + struct acl_ip_label **i_tmp, *i_utmp2;
49702 + struct gr_hash_struct ghash;
49703 + struct subject_map *subjmap;
49704 + unsigned int i_num;
49705 + int err;
49706 +
49707 + s_tmp = lookup_subject_map(userp);
49708 +
49709 + /* we've already copied this subject into the kernel, just return
49710 + the reference to it, and don't copy it over again
49711 + */
49712 + if (s_tmp)
49713 + return(s_tmp);
49714 +
49715 + if ((s_tmp = (struct acl_subject_label *)
49716 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49717 + return ERR_PTR(-ENOMEM);
49718 +
49719 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49720 + if (subjmap == NULL)
49721 + return ERR_PTR(-ENOMEM);
49722 +
49723 + subjmap->user = userp;
49724 + subjmap->kernel = s_tmp;
49725 + insert_subj_map_entry(subjmap);
49726 +
49727 + if (copy_from_user(s_tmp, userp,
49728 + sizeof (struct acl_subject_label)))
49729 + return ERR_PTR(-EFAULT);
49730 +
49731 + len = strnlen_user(s_tmp->filename, PATH_MAX);
49732 +
49733 + if (!len || len >= PATH_MAX)
49734 + return ERR_PTR(-EINVAL);
49735 +
49736 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49737 + return ERR_PTR(-ENOMEM);
49738 +
49739 + if (copy_from_user(tmp, s_tmp->filename, len))
49740 + return ERR_PTR(-EFAULT);
49741 + tmp[len-1] = '\0';
49742 + s_tmp->filename = tmp;
49743 +
49744 + if (!strcmp(s_tmp->filename, "/"))
49745 + role->root_label = s_tmp;
49746 +
49747 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49748 + return ERR_PTR(-EFAULT);
49749 +
49750 + /* copy user and group transition tables */
49751 +
49752 + if (s_tmp->user_trans_num) {
49753 + uid_t *uidlist;
49754 +
49755 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49756 + if (uidlist == NULL)
49757 + return ERR_PTR(-ENOMEM);
49758 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49759 + return ERR_PTR(-EFAULT);
49760 +
49761 + s_tmp->user_transitions = uidlist;
49762 + }
49763 +
49764 + if (s_tmp->group_trans_num) {
49765 + gid_t *gidlist;
49766 +
49767 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49768 + if (gidlist == NULL)
49769 + return ERR_PTR(-ENOMEM);
49770 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49771 + return ERR_PTR(-EFAULT);
49772 +
49773 + s_tmp->group_transitions = gidlist;
49774 + }
49775 +
49776 + /* set up object hash table */
49777 + num_objs = count_user_objs(ghash.first);
49778 +
49779 + s_tmp->obj_hash_size = num_objs;
49780 + s_tmp->obj_hash =
49781 + (struct acl_object_label **)
49782 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49783 +
49784 + if (!s_tmp->obj_hash)
49785 + return ERR_PTR(-ENOMEM);
49786 +
49787 + memset(s_tmp->obj_hash, 0,
49788 + s_tmp->obj_hash_size *
49789 + sizeof (struct acl_object_label *));
49790 +
49791 + /* add in objects */
49792 + err = copy_user_objs(ghash.first, s_tmp, role);
49793 +
49794 + if (err)
49795 + return ERR_PTR(err);
49796 +
49797 + /* set pointer for parent subject */
49798 + if (s_tmp->parent_subject) {
49799 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49800 +
49801 + if (IS_ERR(s_tmp2))
49802 + return s_tmp2;
49803 +
49804 + s_tmp->parent_subject = s_tmp2;
49805 + }
49806 +
49807 + /* add in ip acls */
49808 +
49809 + if (!s_tmp->ip_num) {
49810 + s_tmp->ips = NULL;
49811 + goto insert;
49812 + }
49813 +
49814 + i_tmp =
49815 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49816 + sizeof (struct acl_ip_label *));
49817 +
49818 + if (!i_tmp)
49819 + return ERR_PTR(-ENOMEM);
49820 +
49821 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49822 + *(i_tmp + i_num) =
49823 + (struct acl_ip_label *)
49824 + acl_alloc(sizeof (struct acl_ip_label));
49825 + if (!*(i_tmp + i_num))
49826 + return ERR_PTR(-ENOMEM);
49827 +
49828 + if (copy_from_user
49829 + (&i_utmp2, s_tmp->ips + i_num,
49830 + sizeof (struct acl_ip_label *)))
49831 + return ERR_PTR(-EFAULT);
49832 +
49833 + if (copy_from_user
49834 + (*(i_tmp + i_num), i_utmp2,
49835 + sizeof (struct acl_ip_label)))
49836 + return ERR_PTR(-EFAULT);
49837 +
49838 + if ((*(i_tmp + i_num))->iface == NULL)
49839 + continue;
49840 +
49841 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49842 + if (!len || len >= IFNAMSIZ)
49843 + return ERR_PTR(-EINVAL);
49844 + tmp = acl_alloc(len);
49845 + if (tmp == NULL)
49846 + return ERR_PTR(-ENOMEM);
49847 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49848 + return ERR_PTR(-EFAULT);
49849 + (*(i_tmp + i_num))->iface = tmp;
49850 + }
49851 +
49852 + s_tmp->ips = i_tmp;
49853 +
49854 +insert:
49855 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49856 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49857 + return ERR_PTR(-ENOMEM);
49858 +
49859 + return s_tmp;
49860 +}
49861 +
49862 +static int
49863 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49864 +{
49865 + struct acl_subject_label s_pre;
49866 + struct acl_subject_label * ret;
49867 + int err;
49868 +
49869 + while (userp) {
49870 + if (copy_from_user(&s_pre, userp,
49871 + sizeof (struct acl_subject_label)))
49872 + return -EFAULT;
49873 +
49874 + /* do not add nested subjects here, add
49875 + while parsing objects
49876 + */
49877 +
49878 + if (s_pre.mode & GR_NESTED) {
49879 + userp = s_pre.prev;
49880 + continue;
49881 + }
49882 +
49883 + ret = do_copy_user_subj(userp, role);
49884 +
49885 + err = PTR_ERR(ret);
49886 + if (IS_ERR(ret))
49887 + return err;
49888 +
49889 + insert_acl_subj_label(ret, role);
49890 +
49891 + userp = s_pre.prev;
49892 + }
49893 +
49894 + return 0;
49895 +}
49896 +
49897 +static int
49898 +copy_user_acl(struct gr_arg *arg)
49899 +{
49900 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49901 + struct sprole_pw *sptmp;
49902 + struct gr_hash_struct *ghash;
49903 + uid_t *domainlist;
49904 + unsigned int r_num;
49905 + unsigned int len;
49906 + char *tmp;
49907 + int err = 0;
49908 + __u16 i;
49909 + __u32 num_subjs;
49910 +
49911 + /* we need a default and kernel role */
49912 + if (arg->role_db.num_roles < 2)
49913 + return -EINVAL;
49914 +
49915 + /* copy special role authentication info from userspace */
49916 +
49917 + num_sprole_pws = arg->num_sprole_pws;
49918 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49919 +
49920 + if (!acl_special_roles) {
49921 + err = -ENOMEM;
49922 + goto cleanup;
49923 + }
49924 +
49925 + for (i = 0; i < num_sprole_pws; i++) {
49926 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49927 + if (!sptmp) {
49928 + err = -ENOMEM;
49929 + goto cleanup;
49930 + }
49931 + if (copy_from_user(sptmp, arg->sprole_pws + i,
49932 + sizeof (struct sprole_pw))) {
49933 + err = -EFAULT;
49934 + goto cleanup;
49935 + }
49936 +
49937 + len =
49938 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49939 +
49940 + if (!len || len >= GR_SPROLE_LEN) {
49941 + err = -EINVAL;
49942 + goto cleanup;
49943 + }
49944 +
49945 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49946 + err = -ENOMEM;
49947 + goto cleanup;
49948 + }
49949 +
49950 + if (copy_from_user(tmp, sptmp->rolename, len)) {
49951 + err = -EFAULT;
49952 + goto cleanup;
49953 + }
49954 + tmp[len-1] = '\0';
49955 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49956 + printk(KERN_ALERT "Copying special role %s\n", tmp);
49957 +#endif
49958 + sptmp->rolename = tmp;
49959 + acl_special_roles[i] = sptmp;
49960 + }
49961 +
49962 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49963 +
49964 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49965 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
49966 +
49967 + if (!r_tmp) {
49968 + err = -ENOMEM;
49969 + goto cleanup;
49970 + }
49971 +
49972 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
49973 + sizeof (struct acl_role_label *))) {
49974 + err = -EFAULT;
49975 + goto cleanup;
49976 + }
49977 +
49978 + if (copy_from_user(r_tmp, r_utmp2,
49979 + sizeof (struct acl_role_label))) {
49980 + err = -EFAULT;
49981 + goto cleanup;
49982 + }
49983 +
49984 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49985 +
49986 + if (!len || len >= PATH_MAX) {
49987 + err = -EINVAL;
49988 + goto cleanup;
49989 + }
49990 +
49991 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49992 + err = -ENOMEM;
49993 + goto cleanup;
49994 + }
49995 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
49996 + err = -EFAULT;
49997 + goto cleanup;
49998 + }
49999 + tmp[len-1] = '\0';
50000 + r_tmp->rolename = tmp;
50001 +
50002 + if (!strcmp(r_tmp->rolename, "default")
50003 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
50004 + default_role = r_tmp;
50005 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
50006 + kernel_role = r_tmp;
50007 + }
50008 +
50009 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
50010 + err = -ENOMEM;
50011 + goto cleanup;
50012 + }
50013 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
50014 + err = -EFAULT;
50015 + goto cleanup;
50016 + }
50017 +
50018 + r_tmp->hash = ghash;
50019 +
50020 + num_subjs = count_user_subjs(r_tmp->hash->first);
50021 +
50022 + r_tmp->subj_hash_size = num_subjs;
50023 + r_tmp->subj_hash =
50024 + (struct acl_subject_label **)
50025 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
50026 +
50027 + if (!r_tmp->subj_hash) {
50028 + err = -ENOMEM;
50029 + goto cleanup;
50030 + }
50031 +
50032 + err = copy_user_allowedips(r_tmp);
50033 + if (err)
50034 + goto cleanup;
50035 +
50036 + /* copy domain info */
50037 + if (r_tmp->domain_children != NULL) {
50038 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
50039 + if (domainlist == NULL) {
50040 + err = -ENOMEM;
50041 + goto cleanup;
50042 + }
50043 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
50044 + err = -EFAULT;
50045 + goto cleanup;
50046 + }
50047 + r_tmp->domain_children = domainlist;
50048 + }
50049 +
50050 + err = copy_user_transitions(r_tmp);
50051 + if (err)
50052 + goto cleanup;
50053 +
50054 + memset(r_tmp->subj_hash, 0,
50055 + r_tmp->subj_hash_size *
50056 + sizeof (struct acl_subject_label *));
50057 +
50058 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
50059 +
50060 + if (err)
50061 + goto cleanup;
50062 +
50063 + /* set nested subject list to null */
50064 + r_tmp->hash->first = NULL;
50065 +
50066 + insert_acl_role_label(r_tmp);
50067 + }
50068 +
50069 + goto return_err;
50070 + cleanup:
50071 + free_variables();
50072 + return_err:
50073 + return err;
50074 +
50075 +}
50076 +
50077 +static int
50078 +gracl_init(struct gr_arg *args)
50079 +{
50080 + int error = 0;
50081 +
50082 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
50083 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
50084 +
50085 + if (init_variables(args)) {
50086 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
50087 + error = -ENOMEM;
50088 + free_variables();
50089 + goto out;
50090 + }
50091 +
50092 + error = copy_user_acl(args);
50093 + free_init_variables();
50094 + if (error) {
50095 + free_variables();
50096 + goto out;
50097 + }
50098 +
50099 + if ((error = gr_set_acls(0))) {
50100 + free_variables();
50101 + goto out;
50102 + }
50103 +
50104 + pax_open_kernel();
50105 + gr_status |= GR_READY;
50106 + pax_close_kernel();
50107 +
50108 + out:
50109 + return error;
50110 +}
50111 +
50112 +/* derived from glibc fnmatch() 0: match, 1: no match*/
50113 +
50114 +static int
50115 +glob_match(const char *p, const char *n)
50116 +{
50117 + char c;
50118 +
50119 + while ((c = *p++) != '\0') {
50120 + switch (c) {
50121 + case '?':
50122 + if (*n == '\0')
50123 + return 1;
50124 + else if (*n == '/')
50125 + return 1;
50126 + break;
50127 + case '\\':
50128 + if (*n != c)
50129 + return 1;
50130 + break;
50131 + case '*':
50132 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
50133 + if (*n == '/')
50134 + return 1;
50135 + else if (c == '?') {
50136 + if (*n == '\0')
50137 + return 1;
50138 + else
50139 + ++n;
50140 + }
50141 + }
50142 + if (c == '\0') {
50143 + return 0;
50144 + } else {
50145 + const char *endp;
50146 +
50147 + if ((endp = strchr(n, '/')) == NULL)
50148 + endp = n + strlen(n);
50149 +
50150 + if (c == '[') {
50151 + for (--p; n < endp; ++n)
50152 + if (!glob_match(p, n))
50153 + return 0;
50154 + } else if (c == '/') {
50155 + while (*n != '\0' && *n != '/')
50156 + ++n;
50157 + if (*n == '/' && !glob_match(p, n + 1))
50158 + return 0;
50159 + } else {
50160 + for (--p; n < endp; ++n)
50161 + if (*n == c && !glob_match(p, n))
50162 + return 0;
50163 + }
50164 +
50165 + return 1;
50166 + }
50167 + case '[':
50168 + {
50169 + int not;
50170 + char cold;
50171 +
50172 + if (*n == '\0' || *n == '/')
50173 + return 1;
50174 +
50175 + not = (*p == '!' || *p == '^');
50176 + if (not)
50177 + ++p;
50178 +
50179 + c = *p++;
50180 + for (;;) {
50181 + unsigned char fn = (unsigned char)*n;
50182 +
50183 + if (c == '\0')
50184 + return 1;
50185 + else {
50186 + if (c == fn)
50187 + goto matched;
50188 + cold = c;
50189 + c = *p++;
50190 +
50191 + if (c == '-' && *p != ']') {
50192 + unsigned char cend = *p++;
50193 +
50194 + if (cend == '\0')
50195 + return 1;
50196 +
50197 + if (cold <= fn && fn <= cend)
50198 + goto matched;
50199 +
50200 + c = *p++;
50201 + }
50202 + }
50203 +
50204 + if (c == ']')
50205 + break;
50206 + }
50207 + if (!not)
50208 + return 1;
50209 + break;
50210 + matched:
50211 + while (c != ']') {
50212 + if (c == '\0')
50213 + return 1;
50214 +
50215 + c = *p++;
50216 + }
50217 + if (not)
50218 + return 1;
50219 + }
50220 + break;
50221 + default:
50222 + if (c != *n)
50223 + return 1;
50224 + }
50225 +
50226 + ++n;
50227 + }
50228 +
50229 + if (*n == '\0')
50230 + return 0;
50231 +
50232 + if (*n == '/')
50233 + return 0;
50234 +
50235 + return 1;
50236 +}
50237 +
50238 +static struct acl_object_label *
50239 +chk_glob_label(struct acl_object_label *globbed,
50240 + struct dentry *dentry, struct vfsmount *mnt, char **path)
50241 +{
50242 + struct acl_object_label *tmp;
50243 +
50244 + if (*path == NULL)
50245 + *path = gr_to_filename_nolock(dentry, mnt);
50246 +
50247 + tmp = globbed;
50248 +
50249 + while (tmp) {
50250 + if (!glob_match(tmp->filename, *path))
50251 + return tmp;
50252 + tmp = tmp->next;
50253 + }
50254 +
50255 + return NULL;
50256 +}
50257 +
50258 +static struct acl_object_label *
50259 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50260 + const ino_t curr_ino, const dev_t curr_dev,
50261 + const struct acl_subject_label *subj, char **path, const int checkglob)
50262 +{
50263 + struct acl_subject_label *tmpsubj;
50264 + struct acl_object_label *retval;
50265 + struct acl_object_label *retval2;
50266 +
50267 + tmpsubj = (struct acl_subject_label *) subj;
50268 + read_lock(&gr_inode_lock);
50269 + do {
50270 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
50271 + if (retval) {
50272 + if (checkglob && retval->globbed) {
50273 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
50274 + (struct vfsmount *)orig_mnt, path);
50275 + if (retval2)
50276 + retval = retval2;
50277 + }
50278 + break;
50279 + }
50280 + } while ((tmpsubj = tmpsubj->parent_subject));
50281 + read_unlock(&gr_inode_lock);
50282 +
50283 + return retval;
50284 +}
50285 +
50286 +static __inline__ struct acl_object_label *
50287 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50288 + struct dentry *curr_dentry,
50289 + const struct acl_subject_label *subj, char **path, const int checkglob)
50290 +{
50291 + int newglob = checkglob;
50292 + ino_t inode;
50293 + dev_t device;
50294 +
50295 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
50296 + as we don't want a / * rule to match instead of the / object
50297 + don't do this for create lookups that call this function though, since they're looking up
50298 + on the parent and thus need globbing checks on all paths
50299 + */
50300 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
50301 + newglob = GR_NO_GLOB;
50302 +
50303 + spin_lock(&curr_dentry->d_lock);
50304 + inode = curr_dentry->d_inode->i_ino;
50305 + device = __get_dev(curr_dentry);
50306 + spin_unlock(&curr_dentry->d_lock);
50307 +
50308 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
50309 +}
50310 +
50311 +static struct acl_object_label *
50312 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50313 + const struct acl_subject_label *subj, char *path, const int checkglob)
50314 +{
50315 + struct dentry *dentry = (struct dentry *) l_dentry;
50316 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50317 + struct acl_object_label *retval;
50318 + struct dentry *parent;
50319 +
50320 + write_seqlock(&rename_lock);
50321 + br_read_lock(vfsmount_lock);
50322 +
50323 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
50324 +#ifdef CONFIG_NET
50325 + mnt == sock_mnt ||
50326 +#endif
50327 +#ifdef CONFIG_HUGETLBFS
50328 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
50329 +#endif
50330 + /* ignore Eric Biederman */
50331 + IS_PRIVATE(l_dentry->d_inode))) {
50332 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
50333 + goto out;
50334 + }
50335 +
50336 + for (;;) {
50337 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50338 + break;
50339 +
50340 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50341 + if (mnt->mnt_parent == mnt)
50342 + break;
50343 +
50344 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50345 + if (retval != NULL)
50346 + goto out;
50347 +
50348 + dentry = mnt->mnt_mountpoint;
50349 + mnt = mnt->mnt_parent;
50350 + continue;
50351 + }
50352 +
50353 + parent = dentry->d_parent;
50354 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50355 + if (retval != NULL)
50356 + goto out;
50357 +
50358 + dentry = parent;
50359 + }
50360 +
50361 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50362 +
50363 + /* real_root is pinned so we don't have to hold a reference */
50364 + if (retval == NULL)
50365 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50366 +out:
50367 + br_read_unlock(vfsmount_lock);
50368 + write_sequnlock(&rename_lock);
50369 +
50370 + BUG_ON(retval == NULL);
50371 +
50372 + return retval;
50373 +}
50374 +
50375 +static __inline__ struct acl_object_label *
50376 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50377 + const struct acl_subject_label *subj)
50378 +{
50379 + char *path = NULL;
50380 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50381 +}
50382 +
50383 +static __inline__ struct acl_object_label *
50384 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50385 + const struct acl_subject_label *subj)
50386 +{
50387 + char *path = NULL;
50388 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50389 +}
50390 +
50391 +static __inline__ struct acl_object_label *
50392 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50393 + const struct acl_subject_label *subj, char *path)
50394 +{
50395 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50396 +}
50397 +
50398 +static struct acl_subject_label *
50399 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50400 + const struct acl_role_label *role)
50401 +{
50402 + struct dentry *dentry = (struct dentry *) l_dentry;
50403 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50404 + struct acl_subject_label *retval;
50405 + struct dentry *parent;
50406 +
50407 + write_seqlock(&rename_lock);
50408 + br_read_lock(vfsmount_lock);
50409 +
50410 + for (;;) {
50411 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50412 + break;
50413 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50414 + if (mnt->mnt_parent == mnt)
50415 + break;
50416 +
50417 + spin_lock(&dentry->d_lock);
50418 + read_lock(&gr_inode_lock);
50419 + retval =
50420 + lookup_acl_subj_label(dentry->d_inode->i_ino,
50421 + __get_dev(dentry), role);
50422 + read_unlock(&gr_inode_lock);
50423 + spin_unlock(&dentry->d_lock);
50424 + if (retval != NULL)
50425 + goto out;
50426 +
50427 + dentry = mnt->mnt_mountpoint;
50428 + mnt = mnt->mnt_parent;
50429 + continue;
50430 + }
50431 +
50432 + spin_lock(&dentry->d_lock);
50433 + read_lock(&gr_inode_lock);
50434 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50435 + __get_dev(dentry), role);
50436 + read_unlock(&gr_inode_lock);
50437 + parent = dentry->d_parent;
50438 + spin_unlock(&dentry->d_lock);
50439 +
50440 + if (retval != NULL)
50441 + goto out;
50442 +
50443 + dentry = parent;
50444 + }
50445 +
50446 + spin_lock(&dentry->d_lock);
50447 + read_lock(&gr_inode_lock);
50448 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50449 + __get_dev(dentry), role);
50450 + read_unlock(&gr_inode_lock);
50451 + spin_unlock(&dentry->d_lock);
50452 +
50453 + if (unlikely(retval == NULL)) {
50454 + /* real_root is pinned, we don't need to hold a reference */
50455 + read_lock(&gr_inode_lock);
50456 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50457 + __get_dev(real_root.dentry), role);
50458 + read_unlock(&gr_inode_lock);
50459 + }
50460 +out:
50461 + br_read_unlock(vfsmount_lock);
50462 + write_sequnlock(&rename_lock);
50463 +
50464 + BUG_ON(retval == NULL);
50465 +
50466 + return retval;
50467 +}
50468 +
50469 +static void
50470 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50471 +{
50472 + struct task_struct *task = current;
50473 + const struct cred *cred = current_cred();
50474 +
50475 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50476 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50477 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50478 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50479 +
50480 + return;
50481 +}
50482 +
50483 +static void
50484 +gr_log_learn_sysctl(const char *path, const __u32 mode)
50485 +{
50486 + struct task_struct *task = current;
50487 + const struct cred *cred = current_cred();
50488 +
50489 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50490 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50491 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50492 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50493 +
50494 + return;
50495 +}
50496 +
50497 +static void
50498 +gr_log_learn_id_change(const char type, const unsigned int real,
50499 + const unsigned int effective, const unsigned int fs)
50500 +{
50501 + struct task_struct *task = current;
50502 + const struct cred *cred = current_cred();
50503 +
50504 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50505 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50506 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50507 + type, real, effective, fs, &task->signal->saved_ip);
50508 +
50509 + return;
50510 +}
50511 +
50512 +__u32
50513 +gr_search_file(const struct dentry * dentry, const __u32 mode,
50514 + const struct vfsmount * mnt)
50515 +{
50516 + __u32 retval = mode;
50517 + struct acl_subject_label *curracl;
50518 + struct acl_object_label *currobj;
50519 +
50520 + if (unlikely(!(gr_status & GR_READY)))
50521 + return (mode & ~GR_AUDITS);
50522 +
50523 + curracl = current->acl;
50524 +
50525 + currobj = chk_obj_label(dentry, mnt, curracl);
50526 + retval = currobj->mode & mode;
50527 +
50528 + /* if we're opening a specified transfer file for writing
50529 + (e.g. /dev/initctl), then transfer our role to init
50530 + */
50531 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50532 + current->role->roletype & GR_ROLE_PERSIST)) {
50533 + struct task_struct *task = init_pid_ns.child_reaper;
50534 +
50535 + if (task->role != current->role) {
50536 + task->acl_sp_role = 0;
50537 + task->acl_role_id = current->acl_role_id;
50538 + task->role = current->role;
50539 + rcu_read_lock();
50540 + read_lock(&grsec_exec_file_lock);
50541 + gr_apply_subject_to_task(task);
50542 + read_unlock(&grsec_exec_file_lock);
50543 + rcu_read_unlock();
50544 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50545 + }
50546 + }
50547 +
50548 + if (unlikely
50549 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50550 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50551 + __u32 new_mode = mode;
50552 +
50553 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50554 +
50555 + retval = new_mode;
50556 +
50557 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50558 + new_mode |= GR_INHERIT;
50559 +
50560 + if (!(mode & GR_NOLEARN))
50561 + gr_log_learn(dentry, mnt, new_mode);
50562 + }
50563 +
50564 + return retval;
50565 +}
50566 +
50567 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50568 + const struct dentry *parent,
50569 + const struct vfsmount *mnt)
50570 +{
50571 + struct name_entry *match;
50572 + struct acl_object_label *matchpo;
50573 + struct acl_subject_label *curracl;
50574 + char *path;
50575 +
50576 + if (unlikely(!(gr_status & GR_READY)))
50577 + return NULL;
50578 +
50579 + preempt_disable();
50580 + path = gr_to_filename_rbac(new_dentry, mnt);
50581 + match = lookup_name_entry_create(path);
50582 +
50583 + curracl = current->acl;
50584 +
50585 + if (match) {
50586 + read_lock(&gr_inode_lock);
50587 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50588 + read_unlock(&gr_inode_lock);
50589 +
50590 + if (matchpo) {
50591 + preempt_enable();
50592 + return matchpo;
50593 + }
50594 + }
50595 +
50596 + // lookup parent
50597 +
50598 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50599 +
50600 + preempt_enable();
50601 + return matchpo;
50602 +}
50603 +
50604 +__u32
50605 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50606 + const struct vfsmount * mnt, const __u32 mode)
50607 +{
50608 + struct acl_object_label *matchpo;
50609 + __u32 retval;
50610 +
50611 + if (unlikely(!(gr_status & GR_READY)))
50612 + return (mode & ~GR_AUDITS);
50613 +
50614 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
50615 +
50616 + retval = matchpo->mode & mode;
50617 +
50618 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50619 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50620 + __u32 new_mode = mode;
50621 +
50622 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50623 +
50624 + gr_log_learn(new_dentry, mnt, new_mode);
50625 + return new_mode;
50626 + }
50627 +
50628 + return retval;
50629 +}
50630 +
50631 +__u32
50632 +gr_check_link(const struct dentry * new_dentry,
50633 + const struct dentry * parent_dentry,
50634 + const struct vfsmount * parent_mnt,
50635 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50636 +{
50637 + struct acl_object_label *obj;
50638 + __u32 oldmode, newmode;
50639 + __u32 needmode;
50640 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50641 + GR_DELETE | GR_INHERIT;
50642 +
50643 + if (unlikely(!(gr_status & GR_READY)))
50644 + return (GR_CREATE | GR_LINK);
50645 +
50646 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50647 + oldmode = obj->mode;
50648 +
50649 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50650 + newmode = obj->mode;
50651 +
50652 + needmode = newmode & checkmodes;
50653 +
50654 + // old name for hardlink must have at least the permissions of the new name
50655 + if ((oldmode & needmode) != needmode)
50656 + goto bad;
50657 +
50658 + // if old name had restrictions/auditing, make sure the new name does as well
50659 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50660 +
50661 + // don't allow hardlinking of suid/sgid files without permission
50662 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50663 + needmode |= GR_SETID;
50664 +
50665 + if ((newmode & needmode) != needmode)
50666 + goto bad;
50667 +
50668 + // enforce minimum permissions
50669 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50670 + return newmode;
50671 +bad:
50672 + needmode = oldmode;
50673 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50674 + needmode |= GR_SETID;
50675 +
50676 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50677 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50678 + return (GR_CREATE | GR_LINK);
50679 + } else if (newmode & GR_SUPPRESS)
50680 + return GR_SUPPRESS;
50681 + else
50682 + return 0;
50683 +}
50684 +
50685 +int
50686 +gr_check_hidden_task(const struct task_struct *task)
50687 +{
50688 + if (unlikely(!(gr_status & GR_READY)))
50689 + return 0;
50690 +
50691 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50692 + return 1;
50693 +
50694 + return 0;
50695 +}
50696 +
50697 +int
50698 +gr_check_protected_task(const struct task_struct *task)
50699 +{
50700 + if (unlikely(!(gr_status & GR_READY) || !task))
50701 + return 0;
50702 +
50703 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50704 + task->acl != current->acl)
50705 + return 1;
50706 +
50707 + return 0;
50708 +}
50709 +
50710 +int
50711 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50712 +{
50713 + struct task_struct *p;
50714 + int ret = 0;
50715 +
50716 + if (unlikely(!(gr_status & GR_READY) || !pid))
50717 + return ret;
50718 +
50719 + read_lock(&tasklist_lock);
50720 + do_each_pid_task(pid, type, p) {
50721 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50722 + p->acl != current->acl) {
50723 + ret = 1;
50724 + goto out;
50725 + }
50726 + } while_each_pid_task(pid, type, p);
50727 +out:
50728 + read_unlock(&tasklist_lock);
50729 +
50730 + return ret;
50731 +}
50732 +
50733 +void
50734 +gr_copy_label(struct task_struct *tsk)
50735 +{
50736 + /* plain copying of fields is already done by dup_task_struct */
50737 + tsk->signal->used_accept = 0;
50738 + tsk->acl_sp_role = 0;
50739 + //tsk->acl_role_id = current->acl_role_id;
50740 + //tsk->acl = current->acl;
50741 + //tsk->role = current->role;
50742 + tsk->signal->curr_ip = current->signal->curr_ip;
50743 + tsk->signal->saved_ip = current->signal->saved_ip;
50744 + if (current->exec_file)
50745 + get_file(current->exec_file);
50746 + //tsk->exec_file = current->exec_file;
50747 + //tsk->is_writable = current->is_writable;
50748 + if (unlikely(current->signal->used_accept)) {
50749 + current->signal->curr_ip = 0;
50750 + current->signal->saved_ip = 0;
50751 + }
50752 +
50753 + return;
50754 +}
50755 +
50756 +static void
50757 +gr_set_proc_res(struct task_struct *task)
50758 +{
50759 + struct acl_subject_label *proc;
50760 + unsigned short i;
50761 +
50762 + proc = task->acl;
50763 +
50764 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50765 + return;
50766 +
50767 + for (i = 0; i < RLIM_NLIMITS; i++) {
50768 + if (!(proc->resmask & (1 << i)))
50769 + continue;
50770 +
50771 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50772 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50773 + }
50774 +
50775 + return;
50776 +}
50777 +
50778 +extern int __gr_process_user_ban(struct user_struct *user);
50779 +
50780 +int
50781 +gr_check_user_change(int real, int effective, int fs)
50782 +{
50783 + unsigned int i;
50784 + __u16 num;
50785 + uid_t *uidlist;
50786 + int curuid;
50787 + int realok = 0;
50788 + int effectiveok = 0;
50789 + int fsok = 0;
50790 +
50791 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50792 + struct user_struct *user;
50793 +
50794 + if (real == -1)
50795 + goto skipit;
50796 +
50797 + user = find_user(real);
50798 + if (user == NULL)
50799 + goto skipit;
50800 +
50801 + if (__gr_process_user_ban(user)) {
50802 + /* for find_user */
50803 + free_uid(user);
50804 + return 1;
50805 + }
50806 +
50807 + /* for find_user */
50808 + free_uid(user);
50809 +
50810 +skipit:
50811 +#endif
50812 +
50813 + if (unlikely(!(gr_status & GR_READY)))
50814 + return 0;
50815 +
50816 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50817 + gr_log_learn_id_change('u', real, effective, fs);
50818 +
50819 + num = current->acl->user_trans_num;
50820 + uidlist = current->acl->user_transitions;
50821 +
50822 + if (uidlist == NULL)
50823 + return 0;
50824 +
50825 + if (real == -1)
50826 + realok = 1;
50827 + if (effective == -1)
50828 + effectiveok = 1;
50829 + if (fs == -1)
50830 + fsok = 1;
50831 +
50832 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
50833 + for (i = 0; i < num; i++) {
50834 + curuid = (int)uidlist[i];
50835 + if (real == curuid)
50836 + realok = 1;
50837 + if (effective == curuid)
50838 + effectiveok = 1;
50839 + if (fs == curuid)
50840 + fsok = 1;
50841 + }
50842 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
50843 + for (i = 0; i < num; i++) {
50844 + curuid = (int)uidlist[i];
50845 + if (real == curuid)
50846 + break;
50847 + if (effective == curuid)
50848 + break;
50849 + if (fs == curuid)
50850 + break;
50851 + }
50852 + /* not in deny list */
50853 + if (i == num) {
50854 + realok = 1;
50855 + effectiveok = 1;
50856 + fsok = 1;
50857 + }
50858 + }
50859 +
50860 + if (realok && effectiveok && fsok)
50861 + return 0;
50862 + else {
50863 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50864 + return 1;
50865 + }
50866 +}
50867 +
50868 +int
50869 +gr_check_group_change(int real, int effective, int fs)
50870 +{
50871 + unsigned int i;
50872 + __u16 num;
50873 + gid_t *gidlist;
50874 + int curgid;
50875 + int realok = 0;
50876 + int effectiveok = 0;
50877 + int fsok = 0;
50878 +
50879 + if (unlikely(!(gr_status & GR_READY)))
50880 + return 0;
50881 +
50882 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50883 + gr_log_learn_id_change('g', real, effective, fs);
50884 +
50885 + num = current->acl->group_trans_num;
50886 + gidlist = current->acl->group_transitions;
50887 +
50888 + if (gidlist == NULL)
50889 + return 0;
50890 +
50891 + if (real == -1)
50892 + realok = 1;
50893 + if (effective == -1)
50894 + effectiveok = 1;
50895 + if (fs == -1)
50896 + fsok = 1;
50897 +
50898 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
50899 + for (i = 0; i < num; i++) {
50900 + curgid = (int)gidlist[i];
50901 + if (real == curgid)
50902 + realok = 1;
50903 + if (effective == curgid)
50904 + effectiveok = 1;
50905 + if (fs == curgid)
50906 + fsok = 1;
50907 + }
50908 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
50909 + for (i = 0; i < num; i++) {
50910 + curgid = (int)gidlist[i];
50911 + if (real == curgid)
50912 + break;
50913 + if (effective == curgid)
50914 + break;
50915 + if (fs == curgid)
50916 + break;
50917 + }
50918 + /* not in deny list */
50919 + if (i == num) {
50920 + realok = 1;
50921 + effectiveok = 1;
50922 + fsok = 1;
50923 + }
50924 + }
50925 +
50926 + if (realok && effectiveok && fsok)
50927 + return 0;
50928 + else {
50929 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50930 + return 1;
50931 + }
50932 +}
50933 +
50934 +extern int gr_acl_is_capable(const int cap);
50935 +
50936 +void
50937 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50938 +{
50939 + struct acl_role_label *role = task->role;
50940 + struct acl_subject_label *subj = NULL;
50941 + struct acl_object_label *obj;
50942 + struct file *filp;
50943 +
50944 + if (unlikely(!(gr_status & GR_READY)))
50945 + return;
50946 +
50947 + filp = task->exec_file;
50948 +
50949 + /* kernel process, we'll give them the kernel role */
50950 + if (unlikely(!filp)) {
50951 + task->role = kernel_role;
50952 + task->acl = kernel_role->root_label;
50953 + return;
50954 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50955 + role = lookup_acl_role_label(task, uid, gid);
50956 +
50957 + /* don't change the role if we're not a privileged process */
50958 + if (role && task->role != role &&
50959 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
50960 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
50961 + return;
50962 +
50963 + /* perform subject lookup in possibly new role
50964 + we can use this result below in the case where role == task->role
50965 + */
50966 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50967 +
50968 + /* if we changed uid/gid, but result in the same role
50969 + and are using inheritance, don't lose the inherited subject
50970 + if current subject is other than what normal lookup
50971 + would result in, we arrived via inheritance, don't
50972 + lose subject
50973 + */
50974 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50975 + (subj == task->acl)))
50976 + task->acl = subj;
50977 +
50978 + task->role = role;
50979 +
50980 + task->is_writable = 0;
50981 +
50982 + /* ignore additional mmap checks for processes that are writable
50983 + by the default ACL */
50984 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50985 + if (unlikely(obj->mode & GR_WRITE))
50986 + task->is_writable = 1;
50987 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50988 + if (unlikely(obj->mode & GR_WRITE))
50989 + task->is_writable = 1;
50990 +
50991 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50992 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50993 +#endif
50994 +
50995 + gr_set_proc_res(task);
50996 +
50997 + return;
50998 +}
50999 +
51000 +int
51001 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
51002 + const int unsafe_flags)
51003 +{
51004 + struct task_struct *task = current;
51005 + struct acl_subject_label *newacl;
51006 + struct acl_object_label *obj;
51007 + __u32 retmode;
51008 +
51009 + if (unlikely(!(gr_status & GR_READY)))
51010 + return 0;
51011 +
51012 + newacl = chk_subj_label(dentry, mnt, task->role);
51013 +
51014 + task_lock(task);
51015 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
51016 + !(task->role->roletype & GR_ROLE_GOD) &&
51017 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
51018 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
51019 + task_unlock(task);
51020 + if (unsafe_flags & LSM_UNSAFE_SHARE)
51021 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
51022 + else
51023 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
51024 + return -EACCES;
51025 + }
51026 + task_unlock(task);
51027 +
51028 + obj = chk_obj_label(dentry, mnt, task->acl);
51029 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
51030 +
51031 + if (!(task->acl->mode & GR_INHERITLEARN) &&
51032 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
51033 + if (obj->nested)
51034 + task->acl = obj->nested;
51035 + else
51036 + task->acl = newacl;
51037 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
51038 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
51039 +
51040 + task->is_writable = 0;
51041 +
51042 + /* ignore additional mmap checks for processes that are writable
51043 + by the default ACL */
51044 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
51045 + if (unlikely(obj->mode & GR_WRITE))
51046 + task->is_writable = 1;
51047 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
51048 + if (unlikely(obj->mode & GR_WRITE))
51049 + task->is_writable = 1;
51050 +
51051 + gr_set_proc_res(task);
51052 +
51053 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51054 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51055 +#endif
51056 + return 0;
51057 +}
51058 +
51059 +/* always called with valid inodev ptr */
51060 +static void
51061 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
51062 +{
51063 + struct acl_object_label *matchpo;
51064 + struct acl_subject_label *matchps;
51065 + struct acl_subject_label *subj;
51066 + struct acl_role_label *role;
51067 + unsigned int x;
51068 +
51069 + FOR_EACH_ROLE_START(role)
51070 + FOR_EACH_SUBJECT_START(role, subj, x)
51071 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
51072 + matchpo->mode |= GR_DELETED;
51073 + FOR_EACH_SUBJECT_END(subj,x)
51074 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
51075 + if (subj->inode == ino && subj->device == dev)
51076 + subj->mode |= GR_DELETED;
51077 + FOR_EACH_NESTED_SUBJECT_END(subj)
51078 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
51079 + matchps->mode |= GR_DELETED;
51080 + FOR_EACH_ROLE_END(role)
51081 +
51082 + inodev->nentry->deleted = 1;
51083 +
51084 + return;
51085 +}
51086 +
51087 +void
51088 +gr_handle_delete(const ino_t ino, const dev_t dev)
51089 +{
51090 + struct inodev_entry *inodev;
51091 +
51092 + if (unlikely(!(gr_status & GR_READY)))
51093 + return;
51094 +
51095 + write_lock(&gr_inode_lock);
51096 + inodev = lookup_inodev_entry(ino, dev);
51097 + if (inodev != NULL)
51098 + do_handle_delete(inodev, ino, dev);
51099 + write_unlock(&gr_inode_lock);
51100 +
51101 + return;
51102 +}
51103 +
51104 +static void
51105 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
51106 + const ino_t newinode, const dev_t newdevice,
51107 + struct acl_subject_label *subj)
51108 +{
51109 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
51110 + struct acl_object_label *match;
51111 +
51112 + match = subj->obj_hash[index];
51113 +
51114 + while (match && (match->inode != oldinode ||
51115 + match->device != olddevice ||
51116 + !(match->mode & GR_DELETED)))
51117 + match = match->next;
51118 +
51119 + if (match && (match->inode == oldinode)
51120 + && (match->device == olddevice)
51121 + && (match->mode & GR_DELETED)) {
51122 + if (match->prev == NULL) {
51123 + subj->obj_hash[index] = match->next;
51124 + if (match->next != NULL)
51125 + match->next->prev = NULL;
51126 + } else {
51127 + match->prev->next = match->next;
51128 + if (match->next != NULL)
51129 + match->next->prev = match->prev;
51130 + }
51131 + match->prev = NULL;
51132 + match->next = NULL;
51133 + match->inode = newinode;
51134 + match->device = newdevice;
51135 + match->mode &= ~GR_DELETED;
51136 +
51137 + insert_acl_obj_label(match, subj);
51138 + }
51139 +
51140 + return;
51141 +}
51142 +
51143 +static void
51144 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
51145 + const ino_t newinode, const dev_t newdevice,
51146 + struct acl_role_label *role)
51147 +{
51148 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
51149 + struct acl_subject_label *match;
51150 +
51151 + match = role->subj_hash[index];
51152 +
51153 + while (match && (match->inode != oldinode ||
51154 + match->device != olddevice ||
51155 + !(match->mode & GR_DELETED)))
51156 + match = match->next;
51157 +
51158 + if (match && (match->inode == oldinode)
51159 + && (match->device == olddevice)
51160 + && (match->mode & GR_DELETED)) {
51161 + if (match->prev == NULL) {
51162 + role->subj_hash[index] = match->next;
51163 + if (match->next != NULL)
51164 + match->next->prev = NULL;
51165 + } else {
51166 + match->prev->next = match->next;
51167 + if (match->next != NULL)
51168 + match->next->prev = match->prev;
51169 + }
51170 + match->prev = NULL;
51171 + match->next = NULL;
51172 + match->inode = newinode;
51173 + match->device = newdevice;
51174 + match->mode &= ~GR_DELETED;
51175 +
51176 + insert_acl_subj_label(match, role);
51177 + }
51178 +
51179 + return;
51180 +}
51181 +
51182 +static void
51183 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
51184 + const ino_t newinode, const dev_t newdevice)
51185 +{
51186 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
51187 + struct inodev_entry *match;
51188 +
51189 + match = inodev_set.i_hash[index];
51190 +
51191 + while (match && (match->nentry->inode != oldinode ||
51192 + match->nentry->device != olddevice || !match->nentry->deleted))
51193 + match = match->next;
51194 +
51195 + if (match && (match->nentry->inode == oldinode)
51196 + && (match->nentry->device == olddevice) &&
51197 + match->nentry->deleted) {
51198 + if (match->prev == NULL) {
51199 + inodev_set.i_hash[index] = match->next;
51200 + if (match->next != NULL)
51201 + match->next->prev = NULL;
51202 + } else {
51203 + match->prev->next = match->next;
51204 + if (match->next != NULL)
51205 + match->next->prev = match->prev;
51206 + }
51207 + match->prev = NULL;
51208 + match->next = NULL;
51209 + match->nentry->inode = newinode;
51210 + match->nentry->device = newdevice;
51211 + match->nentry->deleted = 0;
51212 +
51213 + insert_inodev_entry(match);
51214 + }
51215 +
51216 + return;
51217 +}
51218 +
51219 +static void
51220 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
51221 +{
51222 + struct acl_subject_label *subj;
51223 + struct acl_role_label *role;
51224 + unsigned int x;
51225 +
51226 + FOR_EACH_ROLE_START(role)
51227 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
51228 +
51229 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
51230 + if ((subj->inode == ino) && (subj->device == dev)) {
51231 + subj->inode = ino;
51232 + subj->device = dev;
51233 + }
51234 + FOR_EACH_NESTED_SUBJECT_END(subj)
51235 + FOR_EACH_SUBJECT_START(role, subj, x)
51236 + update_acl_obj_label(matchn->inode, matchn->device,
51237 + ino, dev, subj);
51238 + FOR_EACH_SUBJECT_END(subj,x)
51239 + FOR_EACH_ROLE_END(role)
51240 +
51241 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
51242 +
51243 + return;
51244 +}
51245 +
51246 +static void
51247 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
51248 + const struct vfsmount *mnt)
51249 +{
51250 + ino_t ino = dentry->d_inode->i_ino;
51251 + dev_t dev = __get_dev(dentry);
51252 +
51253 + __do_handle_create(matchn, ino, dev);
51254 +
51255 + return;
51256 +}
51257 +
51258 +void
51259 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
51260 +{
51261 + struct name_entry *matchn;
51262 +
51263 + if (unlikely(!(gr_status & GR_READY)))
51264 + return;
51265 +
51266 + preempt_disable();
51267 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
51268 +
51269 + if (unlikely((unsigned long)matchn)) {
51270 + write_lock(&gr_inode_lock);
51271 + do_handle_create(matchn, dentry, mnt);
51272 + write_unlock(&gr_inode_lock);
51273 + }
51274 + preempt_enable();
51275 +
51276 + return;
51277 +}
51278 +
51279 +void
51280 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
51281 +{
51282 + struct name_entry *matchn;
51283 +
51284 + if (unlikely(!(gr_status & GR_READY)))
51285 + return;
51286 +
51287 + preempt_disable();
51288 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
51289 +
51290 + if (unlikely((unsigned long)matchn)) {
51291 + write_lock(&gr_inode_lock);
51292 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
51293 + write_unlock(&gr_inode_lock);
51294 + }
51295 + preempt_enable();
51296 +
51297 + return;
51298 +}
51299 +
51300 +void
51301 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51302 + struct dentry *old_dentry,
51303 + struct dentry *new_dentry,
51304 + struct vfsmount *mnt, const __u8 replace)
51305 +{
51306 + struct name_entry *matchn;
51307 + struct inodev_entry *inodev;
51308 + struct inode *inode = new_dentry->d_inode;
51309 + ino_t old_ino = old_dentry->d_inode->i_ino;
51310 + dev_t old_dev = __get_dev(old_dentry);
51311 +
51312 + /* vfs_rename swaps the name and parent link for old_dentry and
51313 + new_dentry
51314 + at this point, old_dentry has the new name, parent link, and inode
51315 + for the renamed file
51316 + if a file is being replaced by a rename, new_dentry has the inode
51317 + and name for the replaced file
51318 + */
51319 +
51320 + if (unlikely(!(gr_status & GR_READY)))
51321 + return;
51322 +
51323 + preempt_disable();
51324 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
51325 +
51326 + /* we wouldn't have to check d_inode if it weren't for
51327 + NFS silly-renaming
51328 + */
51329 +
51330 + write_lock(&gr_inode_lock);
51331 + if (unlikely(replace && inode)) {
51332 + ino_t new_ino = inode->i_ino;
51333 + dev_t new_dev = __get_dev(new_dentry);
51334 +
51335 + inodev = lookup_inodev_entry(new_ino, new_dev);
51336 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
51337 + do_handle_delete(inodev, new_ino, new_dev);
51338 + }
51339 +
51340 + inodev = lookup_inodev_entry(old_ino, old_dev);
51341 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
51342 + do_handle_delete(inodev, old_ino, old_dev);
51343 +
51344 + if (unlikely((unsigned long)matchn))
51345 + do_handle_create(matchn, old_dentry, mnt);
51346 +
51347 + write_unlock(&gr_inode_lock);
51348 + preempt_enable();
51349 +
51350 + return;
51351 +}
51352 +
51353 +static int
51354 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
51355 + unsigned char **sum)
51356 +{
51357 + struct acl_role_label *r;
51358 + struct role_allowed_ip *ipp;
51359 + struct role_transition *trans;
51360 + unsigned int i;
51361 + int found = 0;
51362 + u32 curr_ip = current->signal->curr_ip;
51363 +
51364 + current->signal->saved_ip = curr_ip;
51365 +
51366 + /* check transition table */
51367 +
51368 + for (trans = current->role->transitions; trans; trans = trans->next) {
51369 + if (!strcmp(rolename, trans->rolename)) {
51370 + found = 1;
51371 + break;
51372 + }
51373 + }
51374 +
51375 + if (!found)
51376 + return 0;
51377 +
51378 + /* handle special roles that do not require authentication
51379 + and check ip */
51380 +
51381 + FOR_EACH_ROLE_START(r)
51382 + if (!strcmp(rolename, r->rolename) &&
51383 + (r->roletype & GR_ROLE_SPECIAL)) {
51384 + found = 0;
51385 + if (r->allowed_ips != NULL) {
51386 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51387 + if ((ntohl(curr_ip) & ipp->netmask) ==
51388 + (ntohl(ipp->addr) & ipp->netmask))
51389 + found = 1;
51390 + }
51391 + } else
51392 + found = 2;
51393 + if (!found)
51394 + return 0;
51395 +
51396 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51397 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51398 + *salt = NULL;
51399 + *sum = NULL;
51400 + return 1;
51401 + }
51402 + }
51403 + FOR_EACH_ROLE_END(r)
51404 +
51405 + for (i = 0; i < num_sprole_pws; i++) {
51406 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51407 + *salt = acl_special_roles[i]->salt;
51408 + *sum = acl_special_roles[i]->sum;
51409 + return 1;
51410 + }
51411 + }
51412 +
51413 + return 0;
51414 +}
51415 +
51416 +static void
51417 +assign_special_role(char *rolename)
51418 +{
51419 + struct acl_object_label *obj;
51420 + struct acl_role_label *r;
51421 + struct acl_role_label *assigned = NULL;
51422 + struct task_struct *tsk;
51423 + struct file *filp;
51424 +
51425 + FOR_EACH_ROLE_START(r)
51426 + if (!strcmp(rolename, r->rolename) &&
51427 + (r->roletype & GR_ROLE_SPECIAL)) {
51428 + assigned = r;
51429 + break;
51430 + }
51431 + FOR_EACH_ROLE_END(r)
51432 +
51433 + if (!assigned)
51434 + return;
51435 +
51436 + read_lock(&tasklist_lock);
51437 + read_lock(&grsec_exec_file_lock);
51438 +
51439 + tsk = current->real_parent;
51440 + if (tsk == NULL)
51441 + goto out_unlock;
51442 +
51443 + filp = tsk->exec_file;
51444 + if (filp == NULL)
51445 + goto out_unlock;
51446 +
51447 + tsk->is_writable = 0;
51448 +
51449 + tsk->acl_sp_role = 1;
51450 + tsk->acl_role_id = ++acl_sp_role_value;
51451 + tsk->role = assigned;
51452 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51453 +
51454 + /* ignore additional mmap checks for processes that are writable
51455 + by the default ACL */
51456 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51457 + if (unlikely(obj->mode & GR_WRITE))
51458 + tsk->is_writable = 1;
51459 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51460 + if (unlikely(obj->mode & GR_WRITE))
51461 + tsk->is_writable = 1;
51462 +
51463 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51464 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51465 +#endif
51466 +
51467 +out_unlock:
51468 + read_unlock(&grsec_exec_file_lock);
51469 + read_unlock(&tasklist_lock);
51470 + return;
51471 +}
51472 +
51473 +int gr_check_secure_terminal(struct task_struct *task)
51474 +{
51475 + struct task_struct *p, *p2, *p3;
51476 + struct files_struct *files;
51477 + struct fdtable *fdt;
51478 + struct file *our_file = NULL, *file;
51479 + int i;
51480 +
51481 + if (task->signal->tty == NULL)
51482 + return 1;
51483 +
51484 + files = get_files_struct(task);
51485 + if (files != NULL) {
51486 + rcu_read_lock();
51487 + fdt = files_fdtable(files);
51488 + for (i=0; i < fdt->max_fds; i++) {
51489 + file = fcheck_files(files, i);
51490 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51491 + get_file(file);
51492 + our_file = file;
51493 + }
51494 + }
51495 + rcu_read_unlock();
51496 + put_files_struct(files);
51497 + }
51498 +
51499 + if (our_file == NULL)
51500 + return 1;
51501 +
51502 + read_lock(&tasklist_lock);
51503 + do_each_thread(p2, p) {
51504 + files = get_files_struct(p);
51505 + if (files == NULL ||
51506 + (p->signal && p->signal->tty == task->signal->tty)) {
51507 + if (files != NULL)
51508 + put_files_struct(files);
51509 + continue;
51510 + }
51511 + rcu_read_lock();
51512 + fdt = files_fdtable(files);
51513 + for (i=0; i < fdt->max_fds; i++) {
51514 + file = fcheck_files(files, i);
51515 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51516 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51517 + p3 = task;
51518 + while (p3->pid > 0) {
51519 + if (p3 == p)
51520 + break;
51521 + p3 = p3->real_parent;
51522 + }
51523 + if (p3 == p)
51524 + break;
51525 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51526 + gr_handle_alertkill(p);
51527 + rcu_read_unlock();
51528 + put_files_struct(files);
51529 + read_unlock(&tasklist_lock);
51530 + fput(our_file);
51531 + return 0;
51532 + }
51533 + }
51534 + rcu_read_unlock();
51535 + put_files_struct(files);
51536 + } while_each_thread(p2, p);
51537 + read_unlock(&tasklist_lock);
51538 +
51539 + fput(our_file);
51540 + return 1;
51541 +}
51542 +
51543 +ssize_t
51544 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51545 +{
51546 + struct gr_arg_wrapper uwrap;
51547 + unsigned char *sprole_salt = NULL;
51548 + unsigned char *sprole_sum = NULL;
51549 + int error = sizeof (struct gr_arg_wrapper);
51550 + int error2 = 0;
51551 +
51552 + mutex_lock(&gr_dev_mutex);
51553 +
51554 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51555 + error = -EPERM;
51556 + goto out;
51557 + }
51558 +
51559 + if (count != sizeof (struct gr_arg_wrapper)) {
51560 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51561 + error = -EINVAL;
51562 + goto out;
51563 + }
51564 +
51565 +
51566 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51567 + gr_auth_expires = 0;
51568 + gr_auth_attempts = 0;
51569 + }
51570 +
51571 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51572 + error = -EFAULT;
51573 + goto out;
51574 + }
51575 +
51576 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51577 + error = -EINVAL;
51578 + goto out;
51579 + }
51580 +
51581 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51582 + error = -EFAULT;
51583 + goto out;
51584 + }
51585 +
51586 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51587 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51588 + time_after(gr_auth_expires, get_seconds())) {
51589 + error = -EBUSY;
51590 + goto out;
51591 + }
51592 +
51593 + /* if non-root trying to do anything other than use a special role,
51594 + do not attempt authentication, do not count towards authentication
51595 + locking
51596 + */
51597 +
51598 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51599 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51600 + current_uid()) {
51601 + error = -EPERM;
51602 + goto out;
51603 + }
51604 +
51605 + /* ensure pw and special role name are null terminated */
51606 +
51607 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51608 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51609 +
51610 + /* Okay.
51611 + * We have our enough of the argument structure..(we have yet
51612 + * to copy_from_user the tables themselves) . Copy the tables
51613 + * only if we need them, i.e. for loading operations. */
51614 +
51615 + switch (gr_usermode->mode) {
51616 + case GR_STATUS:
51617 + if (gr_status & GR_READY) {
51618 + error = 1;
51619 + if (!gr_check_secure_terminal(current))
51620 + error = 3;
51621 + } else
51622 + error = 2;
51623 + goto out;
51624 + case GR_SHUTDOWN:
51625 + if ((gr_status & GR_READY)
51626 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51627 + pax_open_kernel();
51628 + gr_status &= ~GR_READY;
51629 + pax_close_kernel();
51630 +
51631 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51632 + free_variables();
51633 + memset(gr_usermode, 0, sizeof (struct gr_arg));
51634 + memset(gr_system_salt, 0, GR_SALT_LEN);
51635 + memset(gr_system_sum, 0, GR_SHA_LEN);
51636 + } else if (gr_status & GR_READY) {
51637 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51638 + error = -EPERM;
51639 + } else {
51640 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51641 + error = -EAGAIN;
51642 + }
51643 + break;
51644 + case GR_ENABLE:
51645 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51646 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51647 + else {
51648 + if (gr_status & GR_READY)
51649 + error = -EAGAIN;
51650 + else
51651 + error = error2;
51652 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51653 + }
51654 + break;
51655 + case GR_RELOAD:
51656 + if (!(gr_status & GR_READY)) {
51657 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51658 + error = -EAGAIN;
51659 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51660 + preempt_disable();
51661 +
51662 + pax_open_kernel();
51663 + gr_status &= ~GR_READY;
51664 + pax_close_kernel();
51665 +
51666 + free_variables();
51667 + if (!(error2 = gracl_init(gr_usermode))) {
51668 + preempt_enable();
51669 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51670 + } else {
51671 + preempt_enable();
51672 + error = error2;
51673 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51674 + }
51675 + } else {
51676 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51677 + error = -EPERM;
51678 + }
51679 + break;
51680 + case GR_SEGVMOD:
51681 + if (unlikely(!(gr_status & GR_READY))) {
51682 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51683 + error = -EAGAIN;
51684 + break;
51685 + }
51686 +
51687 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51688 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51689 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51690 + struct acl_subject_label *segvacl;
51691 + segvacl =
51692 + lookup_acl_subj_label(gr_usermode->segv_inode,
51693 + gr_usermode->segv_device,
51694 + current->role);
51695 + if (segvacl) {
51696 + segvacl->crashes = 0;
51697 + segvacl->expires = 0;
51698 + }
51699 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51700 + gr_remove_uid(gr_usermode->segv_uid);
51701 + }
51702 + } else {
51703 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51704 + error = -EPERM;
51705 + }
51706 + break;
51707 + case GR_SPROLE:
51708 + case GR_SPROLEPAM:
51709 + if (unlikely(!(gr_status & GR_READY))) {
51710 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51711 + error = -EAGAIN;
51712 + break;
51713 + }
51714 +
51715 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51716 + current->role->expires = 0;
51717 + current->role->auth_attempts = 0;
51718 + }
51719 +
51720 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51721 + time_after(current->role->expires, get_seconds())) {
51722 + error = -EBUSY;
51723 + goto out;
51724 + }
51725 +
51726 + if (lookup_special_role_auth
51727 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51728 + && ((!sprole_salt && !sprole_sum)
51729 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51730 + char *p = "";
51731 + assign_special_role(gr_usermode->sp_role);
51732 + read_lock(&tasklist_lock);
51733 + if (current->real_parent)
51734 + p = current->real_parent->role->rolename;
51735 + read_unlock(&tasklist_lock);
51736 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51737 + p, acl_sp_role_value);
51738 + } else {
51739 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51740 + error = -EPERM;
51741 + if(!(current->role->auth_attempts++))
51742 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51743 +
51744 + goto out;
51745 + }
51746 + break;
51747 + case GR_UNSPROLE:
51748 + if (unlikely(!(gr_status & GR_READY))) {
51749 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51750 + error = -EAGAIN;
51751 + break;
51752 + }
51753 +
51754 + if (current->role->roletype & GR_ROLE_SPECIAL) {
51755 + char *p = "";
51756 + int i = 0;
51757 +
51758 + read_lock(&tasklist_lock);
51759 + if (current->real_parent) {
51760 + p = current->real_parent->role->rolename;
51761 + i = current->real_parent->acl_role_id;
51762 + }
51763 + read_unlock(&tasklist_lock);
51764 +
51765 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51766 + gr_set_acls(1);
51767 + } else {
51768 + error = -EPERM;
51769 + goto out;
51770 + }
51771 + break;
51772 + default:
51773 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51774 + error = -EINVAL;
51775 + break;
51776 + }
51777 +
51778 + if (error != -EPERM)
51779 + goto out;
51780 +
51781 + if(!(gr_auth_attempts++))
51782 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51783 +
51784 + out:
51785 + mutex_unlock(&gr_dev_mutex);
51786 + return error;
51787 +}
51788 +
51789 +/* must be called with
51790 + rcu_read_lock();
51791 + read_lock(&tasklist_lock);
51792 + read_lock(&grsec_exec_file_lock);
51793 +*/
51794 +int gr_apply_subject_to_task(struct task_struct *task)
51795 +{
51796 + struct acl_object_label *obj;
51797 + char *tmpname;
51798 + struct acl_subject_label *tmpsubj;
51799 + struct file *filp;
51800 + struct name_entry *nmatch;
51801 +
51802 + filp = task->exec_file;
51803 + if (filp == NULL)
51804 + return 0;
51805 +
51806 + /* the following is to apply the correct subject
51807 + on binaries running when the RBAC system
51808 + is enabled, when the binaries have been
51809 + replaced or deleted since their execution
51810 + -----
51811 + when the RBAC system starts, the inode/dev
51812 + from exec_file will be one the RBAC system
51813 + is unaware of. It only knows the inode/dev
51814 + of the present file on disk, or the absence
51815 + of it.
51816 + */
51817 + preempt_disable();
51818 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51819 +
51820 + nmatch = lookup_name_entry(tmpname);
51821 + preempt_enable();
51822 + tmpsubj = NULL;
51823 + if (nmatch) {
51824 + if (nmatch->deleted)
51825 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51826 + else
51827 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51828 + if (tmpsubj != NULL)
51829 + task->acl = tmpsubj;
51830 + }
51831 + if (tmpsubj == NULL)
51832 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51833 + task->role);
51834 + if (task->acl) {
51835 + task->is_writable = 0;
51836 + /* ignore additional mmap checks for processes that are writable
51837 + by the default ACL */
51838 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51839 + if (unlikely(obj->mode & GR_WRITE))
51840 + task->is_writable = 1;
51841 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51842 + if (unlikely(obj->mode & GR_WRITE))
51843 + task->is_writable = 1;
51844 +
51845 + gr_set_proc_res(task);
51846 +
51847 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51848 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51849 +#endif
51850 + } else {
51851 + return 1;
51852 + }
51853 +
51854 + return 0;
51855 +}
51856 +
51857 +int
51858 +gr_set_acls(const int type)
51859 +{
51860 + struct task_struct *task, *task2;
51861 + struct acl_role_label *role = current->role;
51862 + __u16 acl_role_id = current->acl_role_id;
51863 + const struct cred *cred;
51864 + int ret;
51865 +
51866 + rcu_read_lock();
51867 + read_lock(&tasklist_lock);
51868 + read_lock(&grsec_exec_file_lock);
51869 + do_each_thread(task2, task) {
51870 + /* check to see if we're called from the exit handler,
51871 + if so, only replace ACLs that have inherited the admin
51872 + ACL */
51873 +
51874 + if (type && (task->role != role ||
51875 + task->acl_role_id != acl_role_id))
51876 + continue;
51877 +
51878 + task->acl_role_id = 0;
51879 + task->acl_sp_role = 0;
51880 +
51881 + if (task->exec_file) {
51882 + cred = __task_cred(task);
51883 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51884 + ret = gr_apply_subject_to_task(task);
51885 + if (ret) {
51886 + read_unlock(&grsec_exec_file_lock);
51887 + read_unlock(&tasklist_lock);
51888 + rcu_read_unlock();
51889 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51890 + return ret;
51891 + }
51892 + } else {
51893 + // it's a kernel process
51894 + task->role = kernel_role;
51895 + task->acl = kernel_role->root_label;
51896 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51897 + task->acl->mode &= ~GR_PROCFIND;
51898 +#endif
51899 + }
51900 + } while_each_thread(task2, task);
51901 + read_unlock(&grsec_exec_file_lock);
51902 + read_unlock(&tasklist_lock);
51903 + rcu_read_unlock();
51904 +
51905 + return 0;
51906 +}
51907 +
51908 +void
51909 +gr_learn_resource(const struct task_struct *task,
51910 + const int res, const unsigned long wanted, const int gt)
51911 +{
51912 + struct acl_subject_label *acl;
51913 + const struct cred *cred;
51914 +
51915 + if (unlikely((gr_status & GR_READY) &&
51916 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51917 + goto skip_reslog;
51918 +
51919 +#ifdef CONFIG_GRKERNSEC_RESLOG
51920 + gr_log_resource(task, res, wanted, gt);
51921 +#endif
51922 + skip_reslog:
51923 +
51924 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51925 + return;
51926 +
51927 + acl = task->acl;
51928 +
51929 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51930 + !(acl->resmask & (1 << (unsigned short) res))))
51931 + return;
51932 +
51933 + if (wanted >= acl->res[res].rlim_cur) {
51934 + unsigned long res_add;
51935 +
51936 + res_add = wanted;
51937 + switch (res) {
51938 + case RLIMIT_CPU:
51939 + res_add += GR_RLIM_CPU_BUMP;
51940 + break;
51941 + case RLIMIT_FSIZE:
51942 + res_add += GR_RLIM_FSIZE_BUMP;
51943 + break;
51944 + case RLIMIT_DATA:
51945 + res_add += GR_RLIM_DATA_BUMP;
51946 + break;
51947 + case RLIMIT_STACK:
51948 + res_add += GR_RLIM_STACK_BUMP;
51949 + break;
51950 + case RLIMIT_CORE:
51951 + res_add += GR_RLIM_CORE_BUMP;
51952 + break;
51953 + case RLIMIT_RSS:
51954 + res_add += GR_RLIM_RSS_BUMP;
51955 + break;
51956 + case RLIMIT_NPROC:
51957 + res_add += GR_RLIM_NPROC_BUMP;
51958 + break;
51959 + case RLIMIT_NOFILE:
51960 + res_add += GR_RLIM_NOFILE_BUMP;
51961 + break;
51962 + case RLIMIT_MEMLOCK:
51963 + res_add += GR_RLIM_MEMLOCK_BUMP;
51964 + break;
51965 + case RLIMIT_AS:
51966 + res_add += GR_RLIM_AS_BUMP;
51967 + break;
51968 + case RLIMIT_LOCKS:
51969 + res_add += GR_RLIM_LOCKS_BUMP;
51970 + break;
51971 + case RLIMIT_SIGPENDING:
51972 + res_add += GR_RLIM_SIGPENDING_BUMP;
51973 + break;
51974 + case RLIMIT_MSGQUEUE:
51975 + res_add += GR_RLIM_MSGQUEUE_BUMP;
51976 + break;
51977 + case RLIMIT_NICE:
51978 + res_add += GR_RLIM_NICE_BUMP;
51979 + break;
51980 + case RLIMIT_RTPRIO:
51981 + res_add += GR_RLIM_RTPRIO_BUMP;
51982 + break;
51983 + case RLIMIT_RTTIME:
51984 + res_add += GR_RLIM_RTTIME_BUMP;
51985 + break;
51986 + }
51987 +
51988 + acl->res[res].rlim_cur = res_add;
51989 +
51990 + if (wanted > acl->res[res].rlim_max)
51991 + acl->res[res].rlim_max = res_add;
51992 +
51993 + /* only log the subject filename, since resource logging is supported for
51994 + single-subject learning only */
51995 + rcu_read_lock();
51996 + cred = __task_cred(task);
51997 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51998 + task->role->roletype, cred->uid, cred->gid, acl->filename,
51999 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
52000 + "", (unsigned long) res, &task->signal->saved_ip);
52001 + rcu_read_unlock();
52002 + }
52003 +
52004 + return;
52005 +}
52006 +
52007 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
52008 +void
52009 +pax_set_initial_flags(struct linux_binprm *bprm)
52010 +{
52011 + struct task_struct *task = current;
52012 + struct acl_subject_label *proc;
52013 + unsigned long flags;
52014 +
52015 + if (unlikely(!(gr_status & GR_READY)))
52016 + return;
52017 +
52018 + flags = pax_get_flags(task);
52019 +
52020 + proc = task->acl;
52021 +
52022 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
52023 + flags &= ~MF_PAX_PAGEEXEC;
52024 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
52025 + flags &= ~MF_PAX_SEGMEXEC;
52026 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
52027 + flags &= ~MF_PAX_RANDMMAP;
52028 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
52029 + flags &= ~MF_PAX_EMUTRAMP;
52030 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
52031 + flags &= ~MF_PAX_MPROTECT;
52032 +
52033 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
52034 + flags |= MF_PAX_PAGEEXEC;
52035 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
52036 + flags |= MF_PAX_SEGMEXEC;
52037 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
52038 + flags |= MF_PAX_RANDMMAP;
52039 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
52040 + flags |= MF_PAX_EMUTRAMP;
52041 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
52042 + flags |= MF_PAX_MPROTECT;
52043 +
52044 + pax_set_flags(task, flags);
52045 +
52046 + return;
52047 +}
52048 +#endif
52049 +
52050 +#ifdef CONFIG_SYSCTL
52051 +/* Eric Biederman likes breaking userland ABI and every inode-based security
52052 + system to save 35kb of memory */
52053 +
52054 +/* we modify the passed in filename, but adjust it back before returning */
52055 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
52056 +{
52057 + struct name_entry *nmatch;
52058 + char *p, *lastp = NULL;
52059 + struct acl_object_label *obj = NULL, *tmp;
52060 + struct acl_subject_label *tmpsubj;
52061 + char c = '\0';
52062 +
52063 + read_lock(&gr_inode_lock);
52064 +
52065 + p = name + len - 1;
52066 + do {
52067 + nmatch = lookup_name_entry(name);
52068 + if (lastp != NULL)
52069 + *lastp = c;
52070 +
52071 + if (nmatch == NULL)
52072 + goto next_component;
52073 + tmpsubj = current->acl;
52074 + do {
52075 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
52076 + if (obj != NULL) {
52077 + tmp = obj->globbed;
52078 + while (tmp) {
52079 + if (!glob_match(tmp->filename, name)) {
52080 + obj = tmp;
52081 + goto found_obj;
52082 + }
52083 + tmp = tmp->next;
52084 + }
52085 + goto found_obj;
52086 + }
52087 + } while ((tmpsubj = tmpsubj->parent_subject));
52088 +next_component:
52089 + /* end case */
52090 + if (p == name)
52091 + break;
52092 +
52093 + while (*p != '/')
52094 + p--;
52095 + if (p == name)
52096 + lastp = p + 1;
52097 + else {
52098 + lastp = p;
52099 + p--;
52100 + }
52101 + c = *lastp;
52102 + *lastp = '\0';
52103 + } while (1);
52104 +found_obj:
52105 + read_unlock(&gr_inode_lock);
52106 + /* obj returned will always be non-null */
52107 + return obj;
52108 +}
52109 +
52110 +/* returns 0 when allowing, non-zero on error
52111 + op of 0 is used for readdir, so we don't log the names of hidden files
52112 +*/
52113 +__u32
52114 +gr_handle_sysctl(const struct ctl_table *table, const int op)
52115 +{
52116 + struct ctl_table *tmp;
52117 + const char *proc_sys = "/proc/sys";
52118 + char *path;
52119 + struct acl_object_label *obj;
52120 + unsigned short len = 0, pos = 0, depth = 0, i;
52121 + __u32 err = 0;
52122 + __u32 mode = 0;
52123 +
52124 + if (unlikely(!(gr_status & GR_READY)))
52125 + return 0;
52126 +
52127 + /* for now, ignore operations on non-sysctl entries if it's not a
52128 + readdir*/
52129 + if (table->child != NULL && op != 0)
52130 + return 0;
52131 +
52132 + mode |= GR_FIND;
52133 + /* it's only a read if it's an entry, read on dirs is for readdir */
52134 + if (op & MAY_READ)
52135 + mode |= GR_READ;
52136 + if (op & MAY_WRITE)
52137 + mode |= GR_WRITE;
52138 +
52139 + preempt_disable();
52140 +
52141 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
52142 +
52143 + /* it's only a read/write if it's an actual entry, not a dir
52144 + (which are opened for readdir)
52145 + */
52146 +
52147 + /* convert the requested sysctl entry into a pathname */
52148 +
52149 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
52150 + len += strlen(tmp->procname);
52151 + len++;
52152 + depth++;
52153 + }
52154 +
52155 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
52156 + /* deny */
52157 + goto out;
52158 + }
52159 +
52160 + memset(path, 0, PAGE_SIZE);
52161 +
52162 + memcpy(path, proc_sys, strlen(proc_sys));
52163 +
52164 + pos += strlen(proc_sys);
52165 +
52166 + for (; depth > 0; depth--) {
52167 + path[pos] = '/';
52168 + pos++;
52169 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
52170 + if (depth == i) {
52171 + memcpy(path + pos, tmp->procname,
52172 + strlen(tmp->procname));
52173 + pos += strlen(tmp->procname);
52174 + }
52175 + i++;
52176 + }
52177 + }
52178 +
52179 + obj = gr_lookup_by_name(path, pos);
52180 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
52181 +
52182 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
52183 + ((err & mode) != mode))) {
52184 + __u32 new_mode = mode;
52185 +
52186 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52187 +
52188 + err = 0;
52189 + gr_log_learn_sysctl(path, new_mode);
52190 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
52191 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
52192 + err = -ENOENT;
52193 + } else if (!(err & GR_FIND)) {
52194 + err = -ENOENT;
52195 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
52196 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
52197 + path, (mode & GR_READ) ? " reading" : "",
52198 + (mode & GR_WRITE) ? " writing" : "");
52199 + err = -EACCES;
52200 + } else if ((err & mode) != mode) {
52201 + err = -EACCES;
52202 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
52203 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
52204 + path, (mode & GR_READ) ? " reading" : "",
52205 + (mode & GR_WRITE) ? " writing" : "");
52206 + err = 0;
52207 + } else
52208 + err = 0;
52209 +
52210 + out:
52211 + preempt_enable();
52212 +
52213 + return err;
52214 +}
52215 +#endif
52216 +
52217 +int
52218 +gr_handle_proc_ptrace(struct task_struct *task)
52219 +{
52220 + struct file *filp;
52221 + struct task_struct *tmp = task;
52222 + struct task_struct *curtemp = current;
52223 + __u32 retmode;
52224 +
52225 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52226 + if (unlikely(!(gr_status & GR_READY)))
52227 + return 0;
52228 +#endif
52229 +
52230 + read_lock(&tasklist_lock);
52231 + read_lock(&grsec_exec_file_lock);
52232 + filp = task->exec_file;
52233 +
52234 + while (tmp->pid > 0) {
52235 + if (tmp == curtemp)
52236 + break;
52237 + tmp = tmp->real_parent;
52238 + }
52239 +
52240 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52241 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
52242 + read_unlock(&grsec_exec_file_lock);
52243 + read_unlock(&tasklist_lock);
52244 + return 1;
52245 + }
52246 +
52247 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52248 + if (!(gr_status & GR_READY)) {
52249 + read_unlock(&grsec_exec_file_lock);
52250 + read_unlock(&tasklist_lock);
52251 + return 0;
52252 + }
52253 +#endif
52254 +
52255 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
52256 + read_unlock(&grsec_exec_file_lock);
52257 + read_unlock(&tasklist_lock);
52258 +
52259 + if (retmode & GR_NOPTRACE)
52260 + return 1;
52261 +
52262 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
52263 + && (current->acl != task->acl || (current->acl != current->role->root_label
52264 + && current->pid != task->pid)))
52265 + return 1;
52266 +
52267 + return 0;
52268 +}
52269 +
52270 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
52271 +{
52272 + if (unlikely(!(gr_status & GR_READY)))
52273 + return;
52274 +
52275 + if (!(current->role->roletype & GR_ROLE_GOD))
52276 + return;
52277 +
52278 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
52279 + p->role->rolename, gr_task_roletype_to_char(p),
52280 + p->acl->filename);
52281 +}
52282 +
52283 +int
52284 +gr_handle_ptrace(struct task_struct *task, const long request)
52285 +{
52286 + struct task_struct *tmp = task;
52287 + struct task_struct *curtemp = current;
52288 + __u32 retmode;
52289 +
52290 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52291 + if (unlikely(!(gr_status & GR_READY)))
52292 + return 0;
52293 +#endif
52294 +
52295 + read_lock(&tasklist_lock);
52296 + while (tmp->pid > 0) {
52297 + if (tmp == curtemp)
52298 + break;
52299 + tmp = tmp->real_parent;
52300 + }
52301 +
52302 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52303 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
52304 + read_unlock(&tasklist_lock);
52305 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52306 + return 1;
52307 + }
52308 + read_unlock(&tasklist_lock);
52309 +
52310 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52311 + if (!(gr_status & GR_READY))
52312 + return 0;
52313 +#endif
52314 +
52315 + read_lock(&grsec_exec_file_lock);
52316 + if (unlikely(!task->exec_file)) {
52317 + read_unlock(&grsec_exec_file_lock);
52318 + return 0;
52319 + }
52320 +
52321 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
52322 + read_unlock(&grsec_exec_file_lock);
52323 +
52324 + if (retmode & GR_NOPTRACE) {
52325 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52326 + return 1;
52327 + }
52328 +
52329 + if (retmode & GR_PTRACERD) {
52330 + switch (request) {
52331 + case PTRACE_SEIZE:
52332 + case PTRACE_POKETEXT:
52333 + case PTRACE_POKEDATA:
52334 + case PTRACE_POKEUSR:
52335 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
52336 + case PTRACE_SETREGS:
52337 + case PTRACE_SETFPREGS:
52338 +#endif
52339 +#ifdef CONFIG_X86
52340 + case PTRACE_SETFPXREGS:
52341 +#endif
52342 +#ifdef CONFIG_ALTIVEC
52343 + case PTRACE_SETVRREGS:
52344 +#endif
52345 + return 1;
52346 + default:
52347 + return 0;
52348 + }
52349 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
52350 + !(current->role->roletype & GR_ROLE_GOD) &&
52351 + (current->acl != task->acl)) {
52352 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52353 + return 1;
52354 + }
52355 +
52356 + return 0;
52357 +}
52358 +
52359 +static int is_writable_mmap(const struct file *filp)
52360 +{
52361 + struct task_struct *task = current;
52362 + struct acl_object_label *obj, *obj2;
52363 +
52364 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52365 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52366 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52367 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52368 + task->role->root_label);
52369 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52370 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52371 + return 1;
52372 + }
52373 + }
52374 + return 0;
52375 +}
52376 +
52377 +int
52378 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52379 +{
52380 + __u32 mode;
52381 +
52382 + if (unlikely(!file || !(prot & PROT_EXEC)))
52383 + return 1;
52384 +
52385 + if (is_writable_mmap(file))
52386 + return 0;
52387 +
52388 + mode =
52389 + gr_search_file(file->f_path.dentry,
52390 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52391 + file->f_path.mnt);
52392 +
52393 + if (!gr_tpe_allow(file))
52394 + return 0;
52395 +
52396 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52397 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52398 + return 0;
52399 + } else if (unlikely(!(mode & GR_EXEC))) {
52400 + return 0;
52401 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52402 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52403 + return 1;
52404 + }
52405 +
52406 + return 1;
52407 +}
52408 +
52409 +int
52410 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52411 +{
52412 + __u32 mode;
52413 +
52414 + if (unlikely(!file || !(prot & PROT_EXEC)))
52415 + return 1;
52416 +
52417 + if (is_writable_mmap(file))
52418 + return 0;
52419 +
52420 + mode =
52421 + gr_search_file(file->f_path.dentry,
52422 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52423 + file->f_path.mnt);
52424 +
52425 + if (!gr_tpe_allow(file))
52426 + return 0;
52427 +
52428 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52429 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52430 + return 0;
52431 + } else if (unlikely(!(mode & GR_EXEC))) {
52432 + return 0;
52433 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52434 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52435 + return 1;
52436 + }
52437 +
52438 + return 1;
52439 +}
52440 +
52441 +void
52442 +gr_acl_handle_psacct(struct task_struct *task, const long code)
52443 +{
52444 + unsigned long runtime;
52445 + unsigned long cputime;
52446 + unsigned int wday, cday;
52447 + __u8 whr, chr;
52448 + __u8 wmin, cmin;
52449 + __u8 wsec, csec;
52450 + struct timespec timeval;
52451 +
52452 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52453 + !(task->acl->mode & GR_PROCACCT)))
52454 + return;
52455 +
52456 + do_posix_clock_monotonic_gettime(&timeval);
52457 + runtime = timeval.tv_sec - task->start_time.tv_sec;
52458 + wday = runtime / (3600 * 24);
52459 + runtime -= wday * (3600 * 24);
52460 + whr = runtime / 3600;
52461 + runtime -= whr * 3600;
52462 + wmin = runtime / 60;
52463 + runtime -= wmin * 60;
52464 + wsec = runtime;
52465 +
52466 + cputime = (task->utime + task->stime) / HZ;
52467 + cday = cputime / (3600 * 24);
52468 + cputime -= cday * (3600 * 24);
52469 + chr = cputime / 3600;
52470 + cputime -= chr * 3600;
52471 + cmin = cputime / 60;
52472 + cputime -= cmin * 60;
52473 + csec = cputime;
52474 +
52475 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52476 +
52477 + return;
52478 +}
52479 +
52480 +void gr_set_kernel_label(struct task_struct *task)
52481 +{
52482 + if (gr_status & GR_READY) {
52483 + task->role = kernel_role;
52484 + task->acl = kernel_role->root_label;
52485 + }
52486 + return;
52487 +}
52488 +
52489 +#ifdef CONFIG_TASKSTATS
52490 +int gr_is_taskstats_denied(int pid)
52491 +{
52492 + struct task_struct *task;
52493 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52494 + const struct cred *cred;
52495 +#endif
52496 + int ret = 0;
52497 +
52498 + /* restrict taskstats viewing to un-chrooted root users
52499 + who have the 'view' subject flag if the RBAC system is enabled
52500 + */
52501 +
52502 + rcu_read_lock();
52503 + read_lock(&tasklist_lock);
52504 + task = find_task_by_vpid(pid);
52505 + if (task) {
52506 +#ifdef CONFIG_GRKERNSEC_CHROOT
52507 + if (proc_is_chrooted(task))
52508 + ret = -EACCES;
52509 +#endif
52510 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52511 + cred = __task_cred(task);
52512 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52513 + if (cred->uid != 0)
52514 + ret = -EACCES;
52515 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52516 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52517 + ret = -EACCES;
52518 +#endif
52519 +#endif
52520 + if (gr_status & GR_READY) {
52521 + if (!(task->acl->mode & GR_VIEW))
52522 + ret = -EACCES;
52523 + }
52524 + } else
52525 + ret = -ENOENT;
52526 +
52527 + read_unlock(&tasklist_lock);
52528 + rcu_read_unlock();
52529 +
52530 + return ret;
52531 +}
52532 +#endif
52533 +
52534 +/* AUXV entries are filled via a descendant of search_binary_handler
52535 + after we've already applied the subject for the target
52536 +*/
52537 +int gr_acl_enable_at_secure(void)
52538 +{
52539 + if (unlikely(!(gr_status & GR_READY)))
52540 + return 0;
52541 +
52542 + if (current->acl->mode & GR_ATSECURE)
52543 + return 1;
52544 +
52545 + return 0;
52546 +}
52547 +
52548 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52549 +{
52550 + struct task_struct *task = current;
52551 + struct dentry *dentry = file->f_path.dentry;
52552 + struct vfsmount *mnt = file->f_path.mnt;
52553 + struct acl_object_label *obj, *tmp;
52554 + struct acl_subject_label *subj;
52555 + unsigned int bufsize;
52556 + int is_not_root;
52557 + char *path;
52558 + dev_t dev = __get_dev(dentry);
52559 +
52560 + if (unlikely(!(gr_status & GR_READY)))
52561 + return 1;
52562 +
52563 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52564 + return 1;
52565 +
52566 + /* ignore Eric Biederman */
52567 + if (IS_PRIVATE(dentry->d_inode))
52568 + return 1;
52569 +
52570 + subj = task->acl;
52571 + do {
52572 + obj = lookup_acl_obj_label(ino, dev, subj);
52573 + if (obj != NULL)
52574 + return (obj->mode & GR_FIND) ? 1 : 0;
52575 + } while ((subj = subj->parent_subject));
52576 +
52577 + /* this is purely an optimization since we're looking for an object
52578 + for the directory we're doing a readdir on
52579 + if it's possible for any globbed object to match the entry we're
52580 + filling into the directory, then the object we find here will be
52581 + an anchor point with attached globbed objects
52582 + */
52583 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52584 + if (obj->globbed == NULL)
52585 + return (obj->mode & GR_FIND) ? 1 : 0;
52586 +
52587 + is_not_root = ((obj->filename[0] == '/') &&
52588 + (obj->filename[1] == '\0')) ? 0 : 1;
52589 + bufsize = PAGE_SIZE - namelen - is_not_root;
52590 +
52591 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
52592 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52593 + return 1;
52594 +
52595 + preempt_disable();
52596 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52597 + bufsize);
52598 +
52599 + bufsize = strlen(path);
52600 +
52601 + /* if base is "/", don't append an additional slash */
52602 + if (is_not_root)
52603 + *(path + bufsize) = '/';
52604 + memcpy(path + bufsize + is_not_root, name, namelen);
52605 + *(path + bufsize + namelen + is_not_root) = '\0';
52606 +
52607 + tmp = obj->globbed;
52608 + while (tmp) {
52609 + if (!glob_match(tmp->filename, path)) {
52610 + preempt_enable();
52611 + return (tmp->mode & GR_FIND) ? 1 : 0;
52612 + }
52613 + tmp = tmp->next;
52614 + }
52615 + preempt_enable();
52616 + return (obj->mode & GR_FIND) ? 1 : 0;
52617 +}
52618 +
52619 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52620 +EXPORT_SYMBOL(gr_acl_is_enabled);
52621 +#endif
52622 +EXPORT_SYMBOL(gr_learn_resource);
52623 +EXPORT_SYMBOL(gr_set_kernel_label);
52624 +#ifdef CONFIG_SECURITY
52625 +EXPORT_SYMBOL(gr_check_user_change);
52626 +EXPORT_SYMBOL(gr_check_group_change);
52627 +#endif
52628 +
52629 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52630 new file mode 100644
52631 index 0000000..34fefda
52632 --- /dev/null
52633 +++ b/grsecurity/gracl_alloc.c
52634 @@ -0,0 +1,105 @@
52635 +#include <linux/kernel.h>
52636 +#include <linux/mm.h>
52637 +#include <linux/slab.h>
52638 +#include <linux/vmalloc.h>
52639 +#include <linux/gracl.h>
52640 +#include <linux/grsecurity.h>
52641 +
52642 +static unsigned long alloc_stack_next = 1;
52643 +static unsigned long alloc_stack_size = 1;
52644 +static void **alloc_stack;
52645 +
52646 +static __inline__ int
52647 +alloc_pop(void)
52648 +{
52649 + if (alloc_stack_next == 1)
52650 + return 0;
52651 +
52652 + kfree(alloc_stack[alloc_stack_next - 2]);
52653 +
52654 + alloc_stack_next--;
52655 +
52656 + return 1;
52657 +}
52658 +
52659 +static __inline__ int
52660 +alloc_push(void *buf)
52661 +{
52662 + if (alloc_stack_next >= alloc_stack_size)
52663 + return 1;
52664 +
52665 + alloc_stack[alloc_stack_next - 1] = buf;
52666 +
52667 + alloc_stack_next++;
52668 +
52669 + return 0;
52670 +}
52671 +
52672 +void *
52673 +acl_alloc(unsigned long len)
52674 +{
52675 + void *ret = NULL;
52676 +
52677 + if (!len || len > PAGE_SIZE)
52678 + goto out;
52679 +
52680 + ret = kmalloc(len, GFP_KERNEL);
52681 +
52682 + if (ret) {
52683 + if (alloc_push(ret)) {
52684 + kfree(ret);
52685 + ret = NULL;
52686 + }
52687 + }
52688 +
52689 +out:
52690 + return ret;
52691 +}
52692 +
52693 +void *
52694 +acl_alloc_num(unsigned long num, unsigned long len)
52695 +{
52696 + if (!len || (num > (PAGE_SIZE / len)))
52697 + return NULL;
52698 +
52699 + return acl_alloc(num * len);
52700 +}
52701 +
52702 +void
52703 +acl_free_all(void)
52704 +{
52705 + if (gr_acl_is_enabled() || !alloc_stack)
52706 + return;
52707 +
52708 + while (alloc_pop()) ;
52709 +
52710 + if (alloc_stack) {
52711 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52712 + kfree(alloc_stack);
52713 + else
52714 + vfree(alloc_stack);
52715 + }
52716 +
52717 + alloc_stack = NULL;
52718 + alloc_stack_size = 1;
52719 + alloc_stack_next = 1;
52720 +
52721 + return;
52722 +}
52723 +
52724 +int
52725 +acl_alloc_stack_init(unsigned long size)
52726 +{
52727 + if ((size * sizeof (void *)) <= PAGE_SIZE)
52728 + alloc_stack =
52729 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52730 + else
52731 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
52732 +
52733 + alloc_stack_size = size;
52734 +
52735 + if (!alloc_stack)
52736 + return 0;
52737 + else
52738 + return 1;
52739 +}
52740 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52741 new file mode 100644
52742 index 0000000..955ddfb
52743 --- /dev/null
52744 +++ b/grsecurity/gracl_cap.c
52745 @@ -0,0 +1,101 @@
52746 +#include <linux/kernel.h>
52747 +#include <linux/module.h>
52748 +#include <linux/sched.h>
52749 +#include <linux/gracl.h>
52750 +#include <linux/grsecurity.h>
52751 +#include <linux/grinternal.h>
52752 +
52753 +extern const char *captab_log[];
52754 +extern int captab_log_entries;
52755 +
52756 +int
52757 +gr_acl_is_capable(const int cap)
52758 +{
52759 + struct task_struct *task = current;
52760 + const struct cred *cred = current_cred();
52761 + struct acl_subject_label *curracl;
52762 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52763 + kernel_cap_t cap_audit = __cap_empty_set;
52764 +
52765 + if (!gr_acl_is_enabled())
52766 + return 1;
52767 +
52768 + curracl = task->acl;
52769 +
52770 + cap_drop = curracl->cap_lower;
52771 + cap_mask = curracl->cap_mask;
52772 + cap_audit = curracl->cap_invert_audit;
52773 +
52774 + while ((curracl = curracl->parent_subject)) {
52775 + /* if the cap isn't specified in the current computed mask but is specified in the
52776 + current level subject, and is lowered in the current level subject, then add
52777 + it to the set of dropped capabilities
52778 + otherwise, add the current level subject's mask to the current computed mask
52779 + */
52780 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52781 + cap_raise(cap_mask, cap);
52782 + if (cap_raised(curracl->cap_lower, cap))
52783 + cap_raise(cap_drop, cap);
52784 + if (cap_raised(curracl->cap_invert_audit, cap))
52785 + cap_raise(cap_audit, cap);
52786 + }
52787 + }
52788 +
52789 + if (!cap_raised(cap_drop, cap)) {
52790 + if (cap_raised(cap_audit, cap))
52791 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52792 + return 1;
52793 + }
52794 +
52795 + curracl = task->acl;
52796 +
52797 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52798 + && cap_raised(cred->cap_effective, cap)) {
52799 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52800 + task->role->roletype, cred->uid,
52801 + cred->gid, task->exec_file ?
52802 + gr_to_filename(task->exec_file->f_path.dentry,
52803 + task->exec_file->f_path.mnt) : curracl->filename,
52804 + curracl->filename, 0UL,
52805 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52806 + return 1;
52807 + }
52808 +
52809 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52810 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52811 + return 0;
52812 +}
52813 +
52814 +int
52815 +gr_acl_is_capable_nolog(const int cap)
52816 +{
52817 + struct acl_subject_label *curracl;
52818 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52819 +
52820 + if (!gr_acl_is_enabled())
52821 + return 1;
52822 +
52823 + curracl = current->acl;
52824 +
52825 + cap_drop = curracl->cap_lower;
52826 + cap_mask = curracl->cap_mask;
52827 +
52828 + while ((curracl = curracl->parent_subject)) {
52829 + /* if the cap isn't specified in the current computed mask but is specified in the
52830 + current level subject, and is lowered in the current level subject, then add
52831 + it to the set of dropped capabilities
52832 + otherwise, add the current level subject's mask to the current computed mask
52833 + */
52834 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52835 + cap_raise(cap_mask, cap);
52836 + if (cap_raised(curracl->cap_lower, cap))
52837 + cap_raise(cap_drop, cap);
52838 + }
52839 + }
52840 +
52841 + if (!cap_raised(cap_drop, cap))
52842 + return 1;
52843 +
52844 + return 0;
52845 +}
52846 +
52847 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52848 new file mode 100644
52849 index 0000000..88d0e87
52850 --- /dev/null
52851 +++ b/grsecurity/gracl_fs.c
52852 @@ -0,0 +1,435 @@
52853 +#include <linux/kernel.h>
52854 +#include <linux/sched.h>
52855 +#include <linux/types.h>
52856 +#include <linux/fs.h>
52857 +#include <linux/file.h>
52858 +#include <linux/stat.h>
52859 +#include <linux/grsecurity.h>
52860 +#include <linux/grinternal.h>
52861 +#include <linux/gracl.h>
52862 +
52863 +umode_t
52864 +gr_acl_umask(void)
52865 +{
52866 + if (unlikely(!gr_acl_is_enabled()))
52867 + return 0;
52868 +
52869 + return current->role->umask;
52870 +}
52871 +
52872 +__u32
52873 +gr_acl_handle_hidden_file(const struct dentry * dentry,
52874 + const struct vfsmount * mnt)
52875 +{
52876 + __u32 mode;
52877 +
52878 + if (unlikely(!dentry->d_inode))
52879 + return GR_FIND;
52880 +
52881 + mode =
52882 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52883 +
52884 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52885 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52886 + return mode;
52887 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52888 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52889 + return 0;
52890 + } else if (unlikely(!(mode & GR_FIND)))
52891 + return 0;
52892 +
52893 + return GR_FIND;
52894 +}
52895 +
52896 +__u32
52897 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52898 + int acc_mode)
52899 +{
52900 + __u32 reqmode = GR_FIND;
52901 + __u32 mode;
52902 +
52903 + if (unlikely(!dentry->d_inode))
52904 + return reqmode;
52905 +
52906 + if (acc_mode & MAY_APPEND)
52907 + reqmode |= GR_APPEND;
52908 + else if (acc_mode & MAY_WRITE)
52909 + reqmode |= GR_WRITE;
52910 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52911 + reqmode |= GR_READ;
52912 +
52913 + mode =
52914 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52915 + mnt);
52916 +
52917 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52918 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52919 + reqmode & GR_READ ? " reading" : "",
52920 + reqmode & GR_WRITE ? " writing" : reqmode &
52921 + GR_APPEND ? " appending" : "");
52922 + return reqmode;
52923 + } else
52924 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52925 + {
52926 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52927 + reqmode & GR_READ ? " reading" : "",
52928 + reqmode & GR_WRITE ? " writing" : reqmode &
52929 + GR_APPEND ? " appending" : "");
52930 + return 0;
52931 + } else if (unlikely((mode & reqmode) != reqmode))
52932 + return 0;
52933 +
52934 + return reqmode;
52935 +}
52936 +
52937 +__u32
52938 +gr_acl_handle_creat(const struct dentry * dentry,
52939 + const struct dentry * p_dentry,
52940 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52941 + const int imode)
52942 +{
52943 + __u32 reqmode = GR_WRITE | GR_CREATE;
52944 + __u32 mode;
52945 +
52946 + if (acc_mode & MAY_APPEND)
52947 + reqmode |= GR_APPEND;
52948 + // if a directory was required or the directory already exists, then
52949 + // don't count this open as a read
52950 + if ((acc_mode & MAY_READ) &&
52951 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52952 + reqmode |= GR_READ;
52953 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52954 + reqmode |= GR_SETID;
52955 +
52956 + mode =
52957 + gr_check_create(dentry, p_dentry, p_mnt,
52958 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52959 +
52960 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52961 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52962 + reqmode & GR_READ ? " reading" : "",
52963 + reqmode & GR_WRITE ? " writing" : reqmode &
52964 + GR_APPEND ? " appending" : "");
52965 + return reqmode;
52966 + } else
52967 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52968 + {
52969 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52970 + reqmode & GR_READ ? " reading" : "",
52971 + reqmode & GR_WRITE ? " writing" : reqmode &
52972 + GR_APPEND ? " appending" : "");
52973 + return 0;
52974 + } else if (unlikely((mode & reqmode) != reqmode))
52975 + return 0;
52976 +
52977 + return reqmode;
52978 +}
52979 +
52980 +__u32
52981 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52982 + const int fmode)
52983 +{
52984 + __u32 mode, reqmode = GR_FIND;
52985 +
52986 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52987 + reqmode |= GR_EXEC;
52988 + if (fmode & S_IWOTH)
52989 + reqmode |= GR_WRITE;
52990 + if (fmode & S_IROTH)
52991 + reqmode |= GR_READ;
52992 +
52993 + mode =
52994 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52995 + mnt);
52996 +
52997 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52998 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52999 + reqmode & GR_READ ? " reading" : "",
53000 + reqmode & GR_WRITE ? " writing" : "",
53001 + reqmode & GR_EXEC ? " executing" : "");
53002 + return reqmode;
53003 + } else
53004 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
53005 + {
53006 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
53007 + reqmode & GR_READ ? " reading" : "",
53008 + reqmode & GR_WRITE ? " writing" : "",
53009 + reqmode & GR_EXEC ? " executing" : "");
53010 + return 0;
53011 + } else if (unlikely((mode & reqmode) != reqmode))
53012 + return 0;
53013 +
53014 + return reqmode;
53015 +}
53016 +
53017 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
53018 +{
53019 + __u32 mode;
53020 +
53021 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
53022 +
53023 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
53024 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
53025 + return mode;
53026 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
53027 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
53028 + return 0;
53029 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
53030 + return 0;
53031 +
53032 + return (reqmode);
53033 +}
53034 +
53035 +__u32
53036 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53037 +{
53038 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
53039 +}
53040 +
53041 +__u32
53042 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
53043 +{
53044 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
53045 +}
53046 +
53047 +__u32
53048 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
53049 +{
53050 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
53051 +}
53052 +
53053 +__u32
53054 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
53055 +{
53056 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
53057 +}
53058 +
53059 +__u32
53060 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
53061 + umode_t *modeptr)
53062 +{
53063 + umode_t mode;
53064 +
53065 + *modeptr &= ~gr_acl_umask();
53066 + mode = *modeptr;
53067 +
53068 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
53069 + return 1;
53070 +
53071 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
53072 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
53073 + GR_CHMOD_ACL_MSG);
53074 + } else {
53075 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
53076 + }
53077 +}
53078 +
53079 +__u32
53080 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
53081 +{
53082 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
53083 +}
53084 +
53085 +__u32
53086 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
53087 +{
53088 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
53089 +}
53090 +
53091 +__u32
53092 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
53093 +{
53094 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
53095 +}
53096 +
53097 +__u32
53098 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
53099 +{
53100 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
53101 + GR_UNIXCONNECT_ACL_MSG);
53102 +}
53103 +
53104 +/* hardlinks require at minimum create and link permission,
53105 + any additional privilege required is based on the
53106 + privilege of the file being linked to
53107 +*/
53108 +__u32
53109 +gr_acl_handle_link(const struct dentry * new_dentry,
53110 + const struct dentry * parent_dentry,
53111 + const struct vfsmount * parent_mnt,
53112 + const struct dentry * old_dentry,
53113 + const struct vfsmount * old_mnt, const char *to)
53114 +{
53115 + __u32 mode;
53116 + __u32 needmode = GR_CREATE | GR_LINK;
53117 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
53118 +
53119 + mode =
53120 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
53121 + old_mnt);
53122 +
53123 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
53124 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
53125 + return mode;
53126 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
53127 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
53128 + return 0;
53129 + } else if (unlikely((mode & needmode) != needmode))
53130 + return 0;
53131 +
53132 + return 1;
53133 +}
53134 +
53135 +__u32
53136 +gr_acl_handle_symlink(const struct dentry * new_dentry,
53137 + const struct dentry * parent_dentry,
53138 + const struct vfsmount * parent_mnt, const char *from)
53139 +{
53140 + __u32 needmode = GR_WRITE | GR_CREATE;
53141 + __u32 mode;
53142 +
53143 + mode =
53144 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
53145 + GR_CREATE | GR_AUDIT_CREATE |
53146 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
53147 +
53148 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
53149 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
53150 + return mode;
53151 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
53152 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
53153 + return 0;
53154 + } else if (unlikely((mode & needmode) != needmode))
53155 + return 0;
53156 +
53157 + return (GR_WRITE | GR_CREATE);
53158 +}
53159 +
53160 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
53161 +{
53162 + __u32 mode;
53163 +
53164 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
53165 +
53166 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
53167 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
53168 + return mode;
53169 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
53170 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
53171 + return 0;
53172 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
53173 + return 0;
53174 +
53175 + return (reqmode);
53176 +}
53177 +
53178 +__u32
53179 +gr_acl_handle_mknod(const struct dentry * new_dentry,
53180 + const struct dentry * parent_dentry,
53181 + const struct vfsmount * parent_mnt,
53182 + const int mode)
53183 +{
53184 + __u32 reqmode = GR_WRITE | GR_CREATE;
53185 + if (unlikely(mode & (S_ISUID | S_ISGID)))
53186 + reqmode |= GR_SETID;
53187 +
53188 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53189 + reqmode, GR_MKNOD_ACL_MSG);
53190 +}
53191 +
53192 +__u32
53193 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
53194 + const struct dentry *parent_dentry,
53195 + const struct vfsmount *parent_mnt)
53196 +{
53197 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53198 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
53199 +}
53200 +
53201 +#define RENAME_CHECK_SUCCESS(old, new) \
53202 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
53203 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
53204 +
53205 +int
53206 +gr_acl_handle_rename(struct dentry *new_dentry,
53207 + struct dentry *parent_dentry,
53208 + const struct vfsmount *parent_mnt,
53209 + struct dentry *old_dentry,
53210 + struct inode *old_parent_inode,
53211 + struct vfsmount *old_mnt, const char *newname)
53212 +{
53213 + __u32 comp1, comp2;
53214 + int error = 0;
53215 +
53216 + if (unlikely(!gr_acl_is_enabled()))
53217 + return 0;
53218 +
53219 + if (!new_dentry->d_inode) {
53220 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
53221 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
53222 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
53223 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
53224 + GR_DELETE | GR_AUDIT_DELETE |
53225 + GR_AUDIT_READ | GR_AUDIT_WRITE |
53226 + GR_SUPPRESS, old_mnt);
53227 + } else {
53228 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
53229 + GR_CREATE | GR_DELETE |
53230 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
53231 + GR_AUDIT_READ | GR_AUDIT_WRITE |
53232 + GR_SUPPRESS, parent_mnt);
53233 + comp2 =
53234 + gr_search_file(old_dentry,
53235 + GR_READ | GR_WRITE | GR_AUDIT_READ |
53236 + GR_DELETE | GR_AUDIT_DELETE |
53237 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
53238 + }
53239 +
53240 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
53241 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
53242 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53243 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
53244 + && !(comp2 & GR_SUPPRESS)) {
53245 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53246 + error = -EACCES;
53247 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
53248 + error = -EACCES;
53249 +
53250 + return error;
53251 +}
53252 +
53253 +void
53254 +gr_acl_handle_exit(void)
53255 +{
53256 + u16 id;
53257 + char *rolename;
53258 + struct file *exec_file;
53259 +
53260 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
53261 + !(current->role->roletype & GR_ROLE_PERSIST))) {
53262 + id = current->acl_role_id;
53263 + rolename = current->role->rolename;
53264 + gr_set_acls(1);
53265 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
53266 + }
53267 +
53268 + write_lock(&grsec_exec_file_lock);
53269 + exec_file = current->exec_file;
53270 + current->exec_file = NULL;
53271 + write_unlock(&grsec_exec_file_lock);
53272 +
53273 + if (exec_file)
53274 + fput(exec_file);
53275 +}
53276 +
53277 +int
53278 +gr_acl_handle_procpidmem(const struct task_struct *task)
53279 +{
53280 + if (unlikely(!gr_acl_is_enabled()))
53281 + return 0;
53282 +
53283 + if (task != current && task->acl->mode & GR_PROTPROCFD)
53284 + return -EACCES;
53285 +
53286 + return 0;
53287 +}
53288 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
53289 new file mode 100644
53290 index 0000000..17050ca
53291 --- /dev/null
53292 +++ b/grsecurity/gracl_ip.c
53293 @@ -0,0 +1,381 @@
53294 +#include <linux/kernel.h>
53295 +#include <asm/uaccess.h>
53296 +#include <asm/errno.h>
53297 +#include <net/sock.h>
53298 +#include <linux/file.h>
53299 +#include <linux/fs.h>
53300 +#include <linux/net.h>
53301 +#include <linux/in.h>
53302 +#include <linux/skbuff.h>
53303 +#include <linux/ip.h>
53304 +#include <linux/udp.h>
53305 +#include <linux/types.h>
53306 +#include <linux/sched.h>
53307 +#include <linux/netdevice.h>
53308 +#include <linux/inetdevice.h>
53309 +#include <linux/gracl.h>
53310 +#include <linux/grsecurity.h>
53311 +#include <linux/grinternal.h>
53312 +
53313 +#define GR_BIND 0x01
53314 +#define GR_CONNECT 0x02
53315 +#define GR_INVERT 0x04
53316 +#define GR_BINDOVERRIDE 0x08
53317 +#define GR_CONNECTOVERRIDE 0x10
53318 +#define GR_SOCK_FAMILY 0x20
53319 +
53320 +static const char * gr_protocols[IPPROTO_MAX] = {
53321 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
53322 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
53323 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
53324 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
53325 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
53326 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
53327 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
53328 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
53329 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
53330 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
53331 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
53332 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
53333 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
53334 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
53335 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
53336 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
53337 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
53338 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
53339 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
53340 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
53341 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
53342 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
53343 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
53344 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
53345 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
53346 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
53347 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
53348 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
53349 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
53350 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
53351 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
53352 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
53353 + };
53354 +
53355 +static const char * gr_socktypes[SOCK_MAX] = {
53356 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
53357 + "unknown:7", "unknown:8", "unknown:9", "packet"
53358 + };
53359 +
53360 +static const char * gr_sockfamilies[AF_MAX+1] = {
53361 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
53362 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
53363 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
53364 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
53365 + };
53366 +
53367 +const char *
53368 +gr_proto_to_name(unsigned char proto)
53369 +{
53370 + return gr_protocols[proto];
53371 +}
53372 +
53373 +const char *
53374 +gr_socktype_to_name(unsigned char type)
53375 +{
53376 + return gr_socktypes[type];
53377 +}
53378 +
53379 +const char *
53380 +gr_sockfamily_to_name(unsigned char family)
53381 +{
53382 + return gr_sockfamilies[family];
53383 +}
53384 +
53385 +int
53386 +gr_search_socket(const int domain, const int type, const int protocol)
53387 +{
53388 + struct acl_subject_label *curr;
53389 + const struct cred *cred = current_cred();
53390 +
53391 + if (unlikely(!gr_acl_is_enabled()))
53392 + goto exit;
53393 +
53394 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
53395 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53396 + goto exit; // let the kernel handle it
53397 +
53398 + curr = current->acl;
53399 +
53400 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53401 + /* the family is allowed, if this is PF_INET allow it only if
53402 + the extra sock type/protocol checks pass */
53403 + if (domain == PF_INET)
53404 + goto inet_check;
53405 + goto exit;
53406 + } else {
53407 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53408 + __u32 fakeip = 0;
53409 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53410 + current->role->roletype, cred->uid,
53411 + cred->gid, current->exec_file ?
53412 + gr_to_filename(current->exec_file->f_path.dentry,
53413 + current->exec_file->f_path.mnt) :
53414 + curr->filename, curr->filename,
53415 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53416 + &current->signal->saved_ip);
53417 + goto exit;
53418 + }
53419 + goto exit_fail;
53420 + }
53421 +
53422 +inet_check:
53423 + /* the rest of this checking is for IPv4 only */
53424 + if (!curr->ips)
53425 + goto exit;
53426 +
53427 + if ((curr->ip_type & (1 << type)) &&
53428 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53429 + goto exit;
53430 +
53431 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53432 + /* we don't place acls on raw sockets , and sometimes
53433 + dgram/ip sockets are opened for ioctl and not
53434 + bind/connect, so we'll fake a bind learn log */
53435 + if (type == SOCK_RAW || type == SOCK_PACKET) {
53436 + __u32 fakeip = 0;
53437 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53438 + current->role->roletype, cred->uid,
53439 + cred->gid, current->exec_file ?
53440 + gr_to_filename(current->exec_file->f_path.dentry,
53441 + current->exec_file->f_path.mnt) :
53442 + curr->filename, curr->filename,
53443 + &fakeip, 0, type,
53444 + protocol, GR_CONNECT, &current->signal->saved_ip);
53445 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53446 + __u32 fakeip = 0;
53447 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53448 + current->role->roletype, cred->uid,
53449 + cred->gid, current->exec_file ?
53450 + gr_to_filename(current->exec_file->f_path.dentry,
53451 + current->exec_file->f_path.mnt) :
53452 + curr->filename, curr->filename,
53453 + &fakeip, 0, type,
53454 + protocol, GR_BIND, &current->signal->saved_ip);
53455 + }
53456 + /* we'll log when they use connect or bind */
53457 + goto exit;
53458 + }
53459 +
53460 +exit_fail:
53461 + if (domain == PF_INET)
53462 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53463 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
53464 + else
53465 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53466 + gr_socktype_to_name(type), protocol);
53467 +
53468 + return 0;
53469 +exit:
53470 + return 1;
53471 +}
53472 +
53473 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53474 +{
53475 + if ((ip->mode & mode) &&
53476 + (ip_port >= ip->low) &&
53477 + (ip_port <= ip->high) &&
53478 + ((ntohl(ip_addr) & our_netmask) ==
53479 + (ntohl(our_addr) & our_netmask))
53480 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53481 + && (ip->type & (1 << type))) {
53482 + if (ip->mode & GR_INVERT)
53483 + return 2; // specifically denied
53484 + else
53485 + return 1; // allowed
53486 + }
53487 +
53488 + return 0; // not specifically allowed, may continue parsing
53489 +}
53490 +
53491 +static int
53492 +gr_search_connectbind(const int full_mode, struct sock *sk,
53493 + struct sockaddr_in *addr, const int type)
53494 +{
53495 + char iface[IFNAMSIZ] = {0};
53496 + struct acl_subject_label *curr;
53497 + struct acl_ip_label *ip;
53498 + struct inet_sock *isk;
53499 + struct net_device *dev;
53500 + struct in_device *idev;
53501 + unsigned long i;
53502 + int ret;
53503 + int mode = full_mode & (GR_BIND | GR_CONNECT);
53504 + __u32 ip_addr = 0;
53505 + __u32 our_addr;
53506 + __u32 our_netmask;
53507 + char *p;
53508 + __u16 ip_port = 0;
53509 + const struct cred *cred = current_cred();
53510 +
53511 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53512 + return 0;
53513 +
53514 + curr = current->acl;
53515 + isk = inet_sk(sk);
53516 +
53517 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53518 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53519 + addr->sin_addr.s_addr = curr->inaddr_any_override;
53520 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53521 + struct sockaddr_in saddr;
53522 + int err;
53523 +
53524 + saddr.sin_family = AF_INET;
53525 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
53526 + saddr.sin_port = isk->inet_sport;
53527 +
53528 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53529 + if (err)
53530 + return err;
53531 +
53532 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53533 + if (err)
53534 + return err;
53535 + }
53536 +
53537 + if (!curr->ips)
53538 + return 0;
53539 +
53540 + ip_addr = addr->sin_addr.s_addr;
53541 + ip_port = ntohs(addr->sin_port);
53542 +
53543 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53544 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53545 + current->role->roletype, cred->uid,
53546 + cred->gid, current->exec_file ?
53547 + gr_to_filename(current->exec_file->f_path.dentry,
53548 + current->exec_file->f_path.mnt) :
53549 + curr->filename, curr->filename,
53550 + &ip_addr, ip_port, type,
53551 + sk->sk_protocol, mode, &current->signal->saved_ip);
53552 + return 0;
53553 + }
53554 +
53555 + for (i = 0; i < curr->ip_num; i++) {
53556 + ip = *(curr->ips + i);
53557 + if (ip->iface != NULL) {
53558 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
53559 + p = strchr(iface, ':');
53560 + if (p != NULL)
53561 + *p = '\0';
53562 + dev = dev_get_by_name(sock_net(sk), iface);
53563 + if (dev == NULL)
53564 + continue;
53565 + idev = in_dev_get(dev);
53566 + if (idev == NULL) {
53567 + dev_put(dev);
53568 + continue;
53569 + }
53570 + rcu_read_lock();
53571 + for_ifa(idev) {
53572 + if (!strcmp(ip->iface, ifa->ifa_label)) {
53573 + our_addr = ifa->ifa_address;
53574 + our_netmask = 0xffffffff;
53575 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53576 + if (ret == 1) {
53577 + rcu_read_unlock();
53578 + in_dev_put(idev);
53579 + dev_put(dev);
53580 + return 0;
53581 + } else if (ret == 2) {
53582 + rcu_read_unlock();
53583 + in_dev_put(idev);
53584 + dev_put(dev);
53585 + goto denied;
53586 + }
53587 + }
53588 + } endfor_ifa(idev);
53589 + rcu_read_unlock();
53590 + in_dev_put(idev);
53591 + dev_put(dev);
53592 + } else {
53593 + our_addr = ip->addr;
53594 + our_netmask = ip->netmask;
53595 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53596 + if (ret == 1)
53597 + return 0;
53598 + else if (ret == 2)
53599 + goto denied;
53600 + }
53601 + }
53602 +
53603 +denied:
53604 + if (mode == GR_BIND)
53605 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53606 + else if (mode == GR_CONNECT)
53607 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53608 +
53609 + return -EACCES;
53610 +}
53611 +
53612 +int
53613 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53614 +{
53615 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53616 +}
53617 +
53618 +int
53619 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53620 +{
53621 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53622 +}
53623 +
53624 +int gr_search_listen(struct socket *sock)
53625 +{
53626 + struct sock *sk = sock->sk;
53627 + struct sockaddr_in addr;
53628 +
53629 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53630 + addr.sin_port = inet_sk(sk)->inet_sport;
53631 +
53632 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53633 +}
53634 +
53635 +int gr_search_accept(struct socket *sock)
53636 +{
53637 + struct sock *sk = sock->sk;
53638 + struct sockaddr_in addr;
53639 +
53640 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53641 + addr.sin_port = inet_sk(sk)->inet_sport;
53642 +
53643 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53644 +}
53645 +
53646 +int
53647 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53648 +{
53649 + if (addr)
53650 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53651 + else {
53652 + struct sockaddr_in sin;
53653 + const struct inet_sock *inet = inet_sk(sk);
53654 +
53655 + sin.sin_addr.s_addr = inet->inet_daddr;
53656 + sin.sin_port = inet->inet_dport;
53657 +
53658 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53659 + }
53660 +}
53661 +
53662 +int
53663 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53664 +{
53665 + struct sockaddr_in sin;
53666 +
53667 + if (unlikely(skb->len < sizeof (struct udphdr)))
53668 + return 0; // skip this packet
53669 +
53670 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53671 + sin.sin_port = udp_hdr(skb)->source;
53672 +
53673 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53674 +}
53675 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53676 new file mode 100644
53677 index 0000000..25f54ef
53678 --- /dev/null
53679 +++ b/grsecurity/gracl_learn.c
53680 @@ -0,0 +1,207 @@
53681 +#include <linux/kernel.h>
53682 +#include <linux/mm.h>
53683 +#include <linux/sched.h>
53684 +#include <linux/poll.h>
53685 +#include <linux/string.h>
53686 +#include <linux/file.h>
53687 +#include <linux/types.h>
53688 +#include <linux/vmalloc.h>
53689 +#include <linux/grinternal.h>
53690 +
53691 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53692 + size_t count, loff_t *ppos);
53693 +extern int gr_acl_is_enabled(void);
53694 +
53695 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53696 +static int gr_learn_attached;
53697 +
53698 +/* use a 512k buffer */
53699 +#define LEARN_BUFFER_SIZE (512 * 1024)
53700 +
53701 +static DEFINE_SPINLOCK(gr_learn_lock);
53702 +static DEFINE_MUTEX(gr_learn_user_mutex);
53703 +
53704 +/* we need to maintain two buffers, so that the kernel context of grlearn
53705 + uses a semaphore around the userspace copying, and the other kernel contexts
53706 + use a spinlock when copying into the buffer, since they cannot sleep
53707 +*/
53708 +static char *learn_buffer;
53709 +static char *learn_buffer_user;
53710 +static int learn_buffer_len;
53711 +static int learn_buffer_user_len;
53712 +
53713 +static ssize_t
53714 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53715 +{
53716 + DECLARE_WAITQUEUE(wait, current);
53717 + ssize_t retval = 0;
53718 +
53719 + add_wait_queue(&learn_wait, &wait);
53720 + set_current_state(TASK_INTERRUPTIBLE);
53721 + do {
53722 + mutex_lock(&gr_learn_user_mutex);
53723 + spin_lock(&gr_learn_lock);
53724 + if (learn_buffer_len)
53725 + break;
53726 + spin_unlock(&gr_learn_lock);
53727 + mutex_unlock(&gr_learn_user_mutex);
53728 + if (file->f_flags & O_NONBLOCK) {
53729 + retval = -EAGAIN;
53730 + goto out;
53731 + }
53732 + if (signal_pending(current)) {
53733 + retval = -ERESTARTSYS;
53734 + goto out;
53735 + }
53736 +
53737 + schedule();
53738 + } while (1);
53739 +
53740 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53741 + learn_buffer_user_len = learn_buffer_len;
53742 + retval = learn_buffer_len;
53743 + learn_buffer_len = 0;
53744 +
53745 + spin_unlock(&gr_learn_lock);
53746 +
53747 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53748 + retval = -EFAULT;
53749 +
53750 + mutex_unlock(&gr_learn_user_mutex);
53751 +out:
53752 + set_current_state(TASK_RUNNING);
53753 + remove_wait_queue(&learn_wait, &wait);
53754 + return retval;
53755 +}
53756 +
53757 +static unsigned int
53758 +poll_learn(struct file * file, poll_table * wait)
53759 +{
53760 + poll_wait(file, &learn_wait, wait);
53761 +
53762 + if (learn_buffer_len)
53763 + return (POLLIN | POLLRDNORM);
53764 +
53765 + return 0;
53766 +}
53767 +
53768 +void
53769 +gr_clear_learn_entries(void)
53770 +{
53771 + char *tmp;
53772 +
53773 + mutex_lock(&gr_learn_user_mutex);
53774 + spin_lock(&gr_learn_lock);
53775 + tmp = learn_buffer;
53776 + learn_buffer = NULL;
53777 + spin_unlock(&gr_learn_lock);
53778 + if (tmp)
53779 + vfree(tmp);
53780 + if (learn_buffer_user != NULL) {
53781 + vfree(learn_buffer_user);
53782 + learn_buffer_user = NULL;
53783 + }
53784 + learn_buffer_len = 0;
53785 + mutex_unlock(&gr_learn_user_mutex);
53786 +
53787 + return;
53788 +}
53789 +
53790 +void
53791 +gr_add_learn_entry(const char *fmt, ...)
53792 +{
53793 + va_list args;
53794 + unsigned int len;
53795 +
53796 + if (!gr_learn_attached)
53797 + return;
53798 +
53799 + spin_lock(&gr_learn_lock);
53800 +
53801 + /* leave a gap at the end so we know when it's "full" but don't have to
53802 + compute the exact length of the string we're trying to append
53803 + */
53804 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53805 + spin_unlock(&gr_learn_lock);
53806 + wake_up_interruptible(&learn_wait);
53807 + return;
53808 + }
53809 + if (learn_buffer == NULL) {
53810 + spin_unlock(&gr_learn_lock);
53811 + return;
53812 + }
53813 +
53814 + va_start(args, fmt);
53815 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53816 + va_end(args);
53817 +
53818 + learn_buffer_len += len + 1;
53819 +
53820 + spin_unlock(&gr_learn_lock);
53821 + wake_up_interruptible(&learn_wait);
53822 +
53823 + return;
53824 +}
53825 +
53826 +static int
53827 +open_learn(struct inode *inode, struct file *file)
53828 +{
53829 + if (file->f_mode & FMODE_READ && gr_learn_attached)
53830 + return -EBUSY;
53831 + if (file->f_mode & FMODE_READ) {
53832 + int retval = 0;
53833 + mutex_lock(&gr_learn_user_mutex);
53834 + if (learn_buffer == NULL)
53835 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53836 + if (learn_buffer_user == NULL)
53837 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53838 + if (learn_buffer == NULL) {
53839 + retval = -ENOMEM;
53840 + goto out_error;
53841 + }
53842 + if (learn_buffer_user == NULL) {
53843 + retval = -ENOMEM;
53844 + goto out_error;
53845 + }
53846 + learn_buffer_len = 0;
53847 + learn_buffer_user_len = 0;
53848 + gr_learn_attached = 1;
53849 +out_error:
53850 + mutex_unlock(&gr_learn_user_mutex);
53851 + return retval;
53852 + }
53853 + return 0;
53854 +}
53855 +
53856 +static int
53857 +close_learn(struct inode *inode, struct file *file)
53858 +{
53859 + if (file->f_mode & FMODE_READ) {
53860 + char *tmp = NULL;
53861 + mutex_lock(&gr_learn_user_mutex);
53862 + spin_lock(&gr_learn_lock);
53863 + tmp = learn_buffer;
53864 + learn_buffer = NULL;
53865 + spin_unlock(&gr_learn_lock);
53866 + if (tmp)
53867 + vfree(tmp);
53868 + if (learn_buffer_user != NULL) {
53869 + vfree(learn_buffer_user);
53870 + learn_buffer_user = NULL;
53871 + }
53872 + learn_buffer_len = 0;
53873 + learn_buffer_user_len = 0;
53874 + gr_learn_attached = 0;
53875 + mutex_unlock(&gr_learn_user_mutex);
53876 + }
53877 +
53878 + return 0;
53879 +}
53880 +
53881 +const struct file_operations grsec_fops = {
53882 + .read = read_learn,
53883 + .write = write_grsec_handler,
53884 + .open = open_learn,
53885 + .release = close_learn,
53886 + .poll = poll_learn,
53887 +};
53888 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53889 new file mode 100644
53890 index 0000000..39645c9
53891 --- /dev/null
53892 +++ b/grsecurity/gracl_res.c
53893 @@ -0,0 +1,68 @@
53894 +#include <linux/kernel.h>
53895 +#include <linux/sched.h>
53896 +#include <linux/gracl.h>
53897 +#include <linux/grinternal.h>
53898 +
53899 +static const char *restab_log[] = {
53900 + [RLIMIT_CPU] = "RLIMIT_CPU",
53901 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53902 + [RLIMIT_DATA] = "RLIMIT_DATA",
53903 + [RLIMIT_STACK] = "RLIMIT_STACK",
53904 + [RLIMIT_CORE] = "RLIMIT_CORE",
53905 + [RLIMIT_RSS] = "RLIMIT_RSS",
53906 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
53907 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53908 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53909 + [RLIMIT_AS] = "RLIMIT_AS",
53910 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53911 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53912 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53913 + [RLIMIT_NICE] = "RLIMIT_NICE",
53914 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53915 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53916 + [GR_CRASH_RES] = "RLIMIT_CRASH"
53917 +};
53918 +
53919 +void
53920 +gr_log_resource(const struct task_struct *task,
53921 + const int res, const unsigned long wanted, const int gt)
53922 +{
53923 + const struct cred *cred;
53924 + unsigned long rlim;
53925 +
53926 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
53927 + return;
53928 +
53929 + // not yet supported resource
53930 + if (unlikely(!restab_log[res]))
53931 + return;
53932 +
53933 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53934 + rlim = task_rlimit_max(task, res);
53935 + else
53936 + rlim = task_rlimit(task, res);
53937 +
53938 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53939 + return;
53940 +
53941 + rcu_read_lock();
53942 + cred = __task_cred(task);
53943 +
53944 + if (res == RLIMIT_NPROC &&
53945 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53946 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53947 + goto out_rcu_unlock;
53948 + else if (res == RLIMIT_MEMLOCK &&
53949 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53950 + goto out_rcu_unlock;
53951 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53952 + goto out_rcu_unlock;
53953 + rcu_read_unlock();
53954 +
53955 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53956 +
53957 + return;
53958 +out_rcu_unlock:
53959 + rcu_read_unlock();
53960 + return;
53961 +}
53962 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53963 new file mode 100644
53964 index 0000000..5556be3
53965 --- /dev/null
53966 +++ b/grsecurity/gracl_segv.c
53967 @@ -0,0 +1,299 @@
53968 +#include <linux/kernel.h>
53969 +#include <linux/mm.h>
53970 +#include <asm/uaccess.h>
53971 +#include <asm/errno.h>
53972 +#include <asm/mman.h>
53973 +#include <net/sock.h>
53974 +#include <linux/file.h>
53975 +#include <linux/fs.h>
53976 +#include <linux/net.h>
53977 +#include <linux/in.h>
53978 +#include <linux/slab.h>
53979 +#include <linux/types.h>
53980 +#include <linux/sched.h>
53981 +#include <linux/timer.h>
53982 +#include <linux/gracl.h>
53983 +#include <linux/grsecurity.h>
53984 +#include <linux/grinternal.h>
53985 +
53986 +static struct crash_uid *uid_set;
53987 +static unsigned short uid_used;
53988 +static DEFINE_SPINLOCK(gr_uid_lock);
53989 +extern rwlock_t gr_inode_lock;
53990 +extern struct acl_subject_label *
53991 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53992 + struct acl_role_label *role);
53993 +
53994 +#ifdef CONFIG_BTRFS_FS
53995 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53996 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53997 +#endif
53998 +
53999 +static inline dev_t __get_dev(const struct dentry *dentry)
54000 +{
54001 +#ifdef CONFIG_BTRFS_FS
54002 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
54003 + return get_btrfs_dev_from_inode(dentry->d_inode);
54004 + else
54005 +#endif
54006 + return dentry->d_inode->i_sb->s_dev;
54007 +}
54008 +
54009 +int
54010 +gr_init_uidset(void)
54011 +{
54012 + uid_set =
54013 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
54014 + uid_used = 0;
54015 +
54016 + return uid_set ? 1 : 0;
54017 +}
54018 +
54019 +void
54020 +gr_free_uidset(void)
54021 +{
54022 + if (uid_set)
54023 + kfree(uid_set);
54024 +
54025 + return;
54026 +}
54027 +
54028 +int
54029 +gr_find_uid(const uid_t uid)
54030 +{
54031 + struct crash_uid *tmp = uid_set;
54032 + uid_t buid;
54033 + int low = 0, high = uid_used - 1, mid;
54034 +
54035 + while (high >= low) {
54036 + mid = (low + high) >> 1;
54037 + buid = tmp[mid].uid;
54038 + if (buid == uid)
54039 + return mid;
54040 + if (buid > uid)
54041 + high = mid - 1;
54042 + if (buid < uid)
54043 + low = mid + 1;
54044 + }
54045 +
54046 + return -1;
54047 +}
54048 +
54049 +static __inline__ void
54050 +gr_insertsort(void)
54051 +{
54052 + unsigned short i, j;
54053 + struct crash_uid index;
54054 +
54055 + for (i = 1; i < uid_used; i++) {
54056 + index = uid_set[i];
54057 + j = i;
54058 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
54059 + uid_set[j] = uid_set[j - 1];
54060 + j--;
54061 + }
54062 + uid_set[j] = index;
54063 + }
54064 +
54065 + return;
54066 +}
54067 +
54068 +static __inline__ void
54069 +gr_insert_uid(const uid_t uid, const unsigned long expires)
54070 +{
54071 + int loc;
54072 +
54073 + if (uid_used == GR_UIDTABLE_MAX)
54074 + return;
54075 +
54076 + loc = gr_find_uid(uid);
54077 +
54078 + if (loc >= 0) {
54079 + uid_set[loc].expires = expires;
54080 + return;
54081 + }
54082 +
54083 + uid_set[uid_used].uid = uid;
54084 + uid_set[uid_used].expires = expires;
54085 + uid_used++;
54086 +
54087 + gr_insertsort();
54088 +
54089 + return;
54090 +}
54091 +
54092 +void
54093 +gr_remove_uid(const unsigned short loc)
54094 +{
54095 + unsigned short i;
54096 +
54097 + for (i = loc + 1; i < uid_used; i++)
54098 + uid_set[i - 1] = uid_set[i];
54099 +
54100 + uid_used--;
54101 +
54102 + return;
54103 +}
54104 +
54105 +int
54106 +gr_check_crash_uid(const uid_t uid)
54107 +{
54108 + int loc;
54109 + int ret = 0;
54110 +
54111 + if (unlikely(!gr_acl_is_enabled()))
54112 + return 0;
54113 +
54114 + spin_lock(&gr_uid_lock);
54115 + loc = gr_find_uid(uid);
54116 +
54117 + if (loc < 0)
54118 + goto out_unlock;
54119 +
54120 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
54121 + gr_remove_uid(loc);
54122 + else
54123 + ret = 1;
54124 +
54125 +out_unlock:
54126 + spin_unlock(&gr_uid_lock);
54127 + return ret;
54128 +}
54129 +
54130 +static __inline__ int
54131 +proc_is_setxid(const struct cred *cred)
54132 +{
54133 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
54134 + cred->uid != cred->fsuid)
54135 + return 1;
54136 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
54137 + cred->gid != cred->fsgid)
54138 + return 1;
54139 +
54140 + return 0;
54141 +}
54142 +
54143 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
54144 +
54145 +void
54146 +gr_handle_crash(struct task_struct *task, const int sig)
54147 +{
54148 + struct acl_subject_label *curr;
54149 + struct task_struct *tsk, *tsk2;
54150 + const struct cred *cred;
54151 + const struct cred *cred2;
54152 +
54153 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
54154 + return;
54155 +
54156 + if (unlikely(!gr_acl_is_enabled()))
54157 + return;
54158 +
54159 + curr = task->acl;
54160 +
54161 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
54162 + return;
54163 +
54164 + if (time_before_eq(curr->expires, get_seconds())) {
54165 + curr->expires = 0;
54166 + curr->crashes = 0;
54167 + }
54168 +
54169 + curr->crashes++;
54170 +
54171 + if (!curr->expires)
54172 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
54173 +
54174 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54175 + time_after(curr->expires, get_seconds())) {
54176 + rcu_read_lock();
54177 + cred = __task_cred(task);
54178 + if (cred->uid && proc_is_setxid(cred)) {
54179 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54180 + spin_lock(&gr_uid_lock);
54181 + gr_insert_uid(cred->uid, curr->expires);
54182 + spin_unlock(&gr_uid_lock);
54183 + curr->expires = 0;
54184 + curr->crashes = 0;
54185 + read_lock(&tasklist_lock);
54186 + do_each_thread(tsk2, tsk) {
54187 + cred2 = __task_cred(tsk);
54188 + if (tsk != task && cred2->uid == cred->uid)
54189 + gr_fake_force_sig(SIGKILL, tsk);
54190 + } while_each_thread(tsk2, tsk);
54191 + read_unlock(&tasklist_lock);
54192 + } else {
54193 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54194 + read_lock(&tasklist_lock);
54195 + read_lock(&grsec_exec_file_lock);
54196 + do_each_thread(tsk2, tsk) {
54197 + if (likely(tsk != task)) {
54198 + // if this thread has the same subject as the one that triggered
54199 + // RES_CRASH and it's the same binary, kill it
54200 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
54201 + gr_fake_force_sig(SIGKILL, tsk);
54202 + }
54203 + } while_each_thread(tsk2, tsk);
54204 + read_unlock(&grsec_exec_file_lock);
54205 + read_unlock(&tasklist_lock);
54206 + }
54207 + rcu_read_unlock();
54208 + }
54209 +
54210 + return;
54211 +}
54212 +
54213 +int
54214 +gr_check_crash_exec(const struct file *filp)
54215 +{
54216 + struct acl_subject_label *curr;
54217 +
54218 + if (unlikely(!gr_acl_is_enabled()))
54219 + return 0;
54220 +
54221 + read_lock(&gr_inode_lock);
54222 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
54223 + __get_dev(filp->f_path.dentry),
54224 + current->role);
54225 + read_unlock(&gr_inode_lock);
54226 +
54227 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
54228 + (!curr->crashes && !curr->expires))
54229 + return 0;
54230 +
54231 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54232 + time_after(curr->expires, get_seconds()))
54233 + return 1;
54234 + else if (time_before_eq(curr->expires, get_seconds())) {
54235 + curr->crashes = 0;
54236 + curr->expires = 0;
54237 + }
54238 +
54239 + return 0;
54240 +}
54241 +
54242 +void
54243 +gr_handle_alertkill(struct task_struct *task)
54244 +{
54245 + struct acl_subject_label *curracl;
54246 + __u32 curr_ip;
54247 + struct task_struct *p, *p2;
54248 +
54249 + if (unlikely(!gr_acl_is_enabled()))
54250 + return;
54251 +
54252 + curracl = task->acl;
54253 + curr_ip = task->signal->curr_ip;
54254 +
54255 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
54256 + read_lock(&tasklist_lock);
54257 + do_each_thread(p2, p) {
54258 + if (p->signal->curr_ip == curr_ip)
54259 + gr_fake_force_sig(SIGKILL, p);
54260 + } while_each_thread(p2, p);
54261 + read_unlock(&tasklist_lock);
54262 + } else if (curracl->mode & GR_KILLPROC)
54263 + gr_fake_force_sig(SIGKILL, task);
54264 +
54265 + return;
54266 +}
54267 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
54268 new file mode 100644
54269 index 0000000..9d83a69
54270 --- /dev/null
54271 +++ b/grsecurity/gracl_shm.c
54272 @@ -0,0 +1,40 @@
54273 +#include <linux/kernel.h>
54274 +#include <linux/mm.h>
54275 +#include <linux/sched.h>
54276 +#include <linux/file.h>
54277 +#include <linux/ipc.h>
54278 +#include <linux/gracl.h>
54279 +#include <linux/grsecurity.h>
54280 +#include <linux/grinternal.h>
54281 +
54282 +int
54283 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54284 + const time_t shm_createtime, const uid_t cuid, const int shmid)
54285 +{
54286 + struct task_struct *task;
54287 +
54288 + if (!gr_acl_is_enabled())
54289 + return 1;
54290 +
54291 + rcu_read_lock();
54292 + read_lock(&tasklist_lock);
54293 +
54294 + task = find_task_by_vpid(shm_cprid);
54295 +
54296 + if (unlikely(!task))
54297 + task = find_task_by_vpid(shm_lapid);
54298 +
54299 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
54300 + (task->pid == shm_lapid)) &&
54301 + (task->acl->mode & GR_PROTSHM) &&
54302 + (task->acl != current->acl))) {
54303 + read_unlock(&tasklist_lock);
54304 + rcu_read_unlock();
54305 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
54306 + return 0;
54307 + }
54308 + read_unlock(&tasklist_lock);
54309 + rcu_read_unlock();
54310 +
54311 + return 1;
54312 +}
54313 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
54314 new file mode 100644
54315 index 0000000..bc0be01
54316 --- /dev/null
54317 +++ b/grsecurity/grsec_chdir.c
54318 @@ -0,0 +1,19 @@
54319 +#include <linux/kernel.h>
54320 +#include <linux/sched.h>
54321 +#include <linux/fs.h>
54322 +#include <linux/file.h>
54323 +#include <linux/grsecurity.h>
54324 +#include <linux/grinternal.h>
54325 +
54326 +void
54327 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
54328 +{
54329 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54330 + if ((grsec_enable_chdir && grsec_enable_group &&
54331 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
54332 + !grsec_enable_group)) {
54333 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
54334 + }
54335 +#endif
54336 + return;
54337 +}
54338 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
54339 new file mode 100644
54340 index 0000000..a2dc675
54341 --- /dev/null
54342 +++ b/grsecurity/grsec_chroot.c
54343 @@ -0,0 +1,351 @@
54344 +#include <linux/kernel.h>
54345 +#include <linux/module.h>
54346 +#include <linux/sched.h>
54347 +#include <linux/file.h>
54348 +#include <linux/fs.h>
54349 +#include <linux/mount.h>
54350 +#include <linux/types.h>
54351 +#include <linux/pid_namespace.h>
54352 +#include <linux/grsecurity.h>
54353 +#include <linux/grinternal.h>
54354 +
54355 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
54356 +{
54357 +#ifdef CONFIG_GRKERNSEC
54358 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
54359 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
54360 + task->gr_is_chrooted = 1;
54361 + else
54362 + task->gr_is_chrooted = 0;
54363 +
54364 + task->gr_chroot_dentry = path->dentry;
54365 +#endif
54366 + return;
54367 +}
54368 +
54369 +void gr_clear_chroot_entries(struct task_struct *task)
54370 +{
54371 +#ifdef CONFIG_GRKERNSEC
54372 + task->gr_is_chrooted = 0;
54373 + task->gr_chroot_dentry = NULL;
54374 +#endif
54375 + return;
54376 +}
54377 +
54378 +int
54379 +gr_handle_chroot_unix(const pid_t pid)
54380 +{
54381 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54382 + struct task_struct *p;
54383 +
54384 + if (unlikely(!grsec_enable_chroot_unix))
54385 + return 1;
54386 +
54387 + if (likely(!proc_is_chrooted(current)))
54388 + return 1;
54389 +
54390 + rcu_read_lock();
54391 + read_lock(&tasklist_lock);
54392 + p = find_task_by_vpid_unrestricted(pid);
54393 + if (unlikely(p && !have_same_root(current, p))) {
54394 + read_unlock(&tasklist_lock);
54395 + rcu_read_unlock();
54396 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54397 + return 0;
54398 + }
54399 + read_unlock(&tasklist_lock);
54400 + rcu_read_unlock();
54401 +#endif
54402 + return 1;
54403 +}
54404 +
54405 +int
54406 +gr_handle_chroot_nice(void)
54407 +{
54408 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54409 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54410 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54411 + return -EPERM;
54412 + }
54413 +#endif
54414 + return 0;
54415 +}
54416 +
54417 +int
54418 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54419 +{
54420 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54421 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54422 + && proc_is_chrooted(current)) {
54423 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54424 + return -EACCES;
54425 + }
54426 +#endif
54427 + return 0;
54428 +}
54429 +
54430 +int
54431 +gr_handle_chroot_rawio(const struct inode *inode)
54432 +{
54433 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54434 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54435 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54436 + return 1;
54437 +#endif
54438 + return 0;
54439 +}
54440 +
54441 +int
54442 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54443 +{
54444 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54445 + struct task_struct *p;
54446 + int ret = 0;
54447 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54448 + return ret;
54449 +
54450 + read_lock(&tasklist_lock);
54451 + do_each_pid_task(pid, type, p) {
54452 + if (!have_same_root(current, p)) {
54453 + ret = 1;
54454 + goto out;
54455 + }
54456 + } while_each_pid_task(pid, type, p);
54457 +out:
54458 + read_unlock(&tasklist_lock);
54459 + return ret;
54460 +#endif
54461 + return 0;
54462 +}
54463 +
54464 +int
54465 +gr_pid_is_chrooted(struct task_struct *p)
54466 +{
54467 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54468 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54469 + return 0;
54470 +
54471 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54472 + !have_same_root(current, p)) {
54473 + return 1;
54474 + }
54475 +#endif
54476 + return 0;
54477 +}
54478 +
54479 +EXPORT_SYMBOL(gr_pid_is_chrooted);
54480 +
54481 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54482 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54483 +{
54484 + struct path path, currentroot;
54485 + int ret = 0;
54486 +
54487 + path.dentry = (struct dentry *)u_dentry;
54488 + path.mnt = (struct vfsmount *)u_mnt;
54489 + get_fs_root(current->fs, &currentroot);
54490 + if (path_is_under(&path, &currentroot))
54491 + ret = 1;
54492 + path_put(&currentroot);
54493 +
54494 + return ret;
54495 +}
54496 +#endif
54497 +
54498 +int
54499 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54500 +{
54501 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54502 + if (!grsec_enable_chroot_fchdir)
54503 + return 1;
54504 +
54505 + if (!proc_is_chrooted(current))
54506 + return 1;
54507 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54508 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54509 + return 0;
54510 + }
54511 +#endif
54512 + return 1;
54513 +}
54514 +
54515 +int
54516 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54517 + const time_t shm_createtime)
54518 +{
54519 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54520 + struct task_struct *p;
54521 + time_t starttime;
54522 +
54523 + if (unlikely(!grsec_enable_chroot_shmat))
54524 + return 1;
54525 +
54526 + if (likely(!proc_is_chrooted(current)))
54527 + return 1;
54528 +
54529 + rcu_read_lock();
54530 + read_lock(&tasklist_lock);
54531 +
54532 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54533 + starttime = p->start_time.tv_sec;
54534 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54535 + if (have_same_root(current, p)) {
54536 + goto allow;
54537 + } else {
54538 + read_unlock(&tasklist_lock);
54539 + rcu_read_unlock();
54540 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54541 + return 0;
54542 + }
54543 + }
54544 + /* creator exited, pid reuse, fall through to next check */
54545 + }
54546 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54547 + if (unlikely(!have_same_root(current, p))) {
54548 + read_unlock(&tasklist_lock);
54549 + rcu_read_unlock();
54550 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54551 + return 0;
54552 + }
54553 + }
54554 +
54555 +allow:
54556 + read_unlock(&tasklist_lock);
54557 + rcu_read_unlock();
54558 +#endif
54559 + return 1;
54560 +}
54561 +
54562 +void
54563 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54564 +{
54565 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54566 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54567 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54568 +#endif
54569 + return;
54570 +}
54571 +
54572 +int
54573 +gr_handle_chroot_mknod(const struct dentry *dentry,
54574 + const struct vfsmount *mnt, const int mode)
54575 +{
54576 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54577 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54578 + proc_is_chrooted(current)) {
54579 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54580 + return -EPERM;
54581 + }
54582 +#endif
54583 + return 0;
54584 +}
54585 +
54586 +int
54587 +gr_handle_chroot_mount(const struct dentry *dentry,
54588 + const struct vfsmount *mnt, const char *dev_name)
54589 +{
54590 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54591 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54592 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54593 + return -EPERM;
54594 + }
54595 +#endif
54596 + return 0;
54597 +}
54598 +
54599 +int
54600 +gr_handle_chroot_pivot(void)
54601 +{
54602 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54603 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54604 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54605 + return -EPERM;
54606 + }
54607 +#endif
54608 + return 0;
54609 +}
54610 +
54611 +int
54612 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54613 +{
54614 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54615 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54616 + !gr_is_outside_chroot(dentry, mnt)) {
54617 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54618 + return -EPERM;
54619 + }
54620 +#endif
54621 + return 0;
54622 +}
54623 +
54624 +extern const char *captab_log[];
54625 +extern int captab_log_entries;
54626 +
54627 +int
54628 +gr_chroot_is_capable(const int cap)
54629 +{
54630 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54631 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54632 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54633 + if (cap_raised(chroot_caps, cap)) {
54634 + const struct cred *creds = current_cred();
54635 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54636 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54637 + }
54638 + return 0;
54639 + }
54640 + }
54641 +#endif
54642 + return 1;
54643 +}
54644 +
54645 +int
54646 +gr_chroot_is_capable_nolog(const int cap)
54647 +{
54648 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54649 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54650 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54651 + if (cap_raised(chroot_caps, cap)) {
54652 + return 0;
54653 + }
54654 + }
54655 +#endif
54656 + return 1;
54657 +}
54658 +
54659 +int
54660 +gr_handle_chroot_sysctl(const int op)
54661 +{
54662 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54663 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54664 + proc_is_chrooted(current))
54665 + return -EACCES;
54666 +#endif
54667 + return 0;
54668 +}
54669 +
54670 +void
54671 +gr_handle_chroot_chdir(struct path *path)
54672 +{
54673 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54674 + if (grsec_enable_chroot_chdir)
54675 + set_fs_pwd(current->fs, path);
54676 +#endif
54677 + return;
54678 +}
54679 +
54680 +int
54681 +gr_handle_chroot_chmod(const struct dentry *dentry,
54682 + const struct vfsmount *mnt, const int mode)
54683 +{
54684 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54685 + /* allow chmod +s on directories, but not files */
54686 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54687 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54688 + proc_is_chrooted(current)) {
54689 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54690 + return -EPERM;
54691 + }
54692 +#endif
54693 + return 0;
54694 +}
54695 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54696 new file mode 100644
54697 index 0000000..213ad8b
54698 --- /dev/null
54699 +++ b/grsecurity/grsec_disabled.c
54700 @@ -0,0 +1,437 @@
54701 +#include <linux/kernel.h>
54702 +#include <linux/module.h>
54703 +#include <linux/sched.h>
54704 +#include <linux/file.h>
54705 +#include <linux/fs.h>
54706 +#include <linux/kdev_t.h>
54707 +#include <linux/net.h>
54708 +#include <linux/in.h>
54709 +#include <linux/ip.h>
54710 +#include <linux/skbuff.h>
54711 +#include <linux/sysctl.h>
54712 +
54713 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54714 +void
54715 +pax_set_initial_flags(struct linux_binprm *bprm)
54716 +{
54717 + return;
54718 +}
54719 +#endif
54720 +
54721 +#ifdef CONFIG_SYSCTL
54722 +__u32
54723 +gr_handle_sysctl(const struct ctl_table * table, const int op)
54724 +{
54725 + return 0;
54726 +}
54727 +#endif
54728 +
54729 +#ifdef CONFIG_TASKSTATS
54730 +int gr_is_taskstats_denied(int pid)
54731 +{
54732 + return 0;
54733 +}
54734 +#endif
54735 +
54736 +int
54737 +gr_acl_is_enabled(void)
54738 +{
54739 + return 0;
54740 +}
54741 +
54742 +void
54743 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54744 +{
54745 + return;
54746 +}
54747 +
54748 +int
54749 +gr_handle_rawio(const struct inode *inode)
54750 +{
54751 + return 0;
54752 +}
54753 +
54754 +void
54755 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54756 +{
54757 + return;
54758 +}
54759 +
54760 +int
54761 +gr_handle_ptrace(struct task_struct *task, const long request)
54762 +{
54763 + return 0;
54764 +}
54765 +
54766 +int
54767 +gr_handle_proc_ptrace(struct task_struct *task)
54768 +{
54769 + return 0;
54770 +}
54771 +
54772 +void
54773 +gr_learn_resource(const struct task_struct *task,
54774 + const int res, const unsigned long wanted, const int gt)
54775 +{
54776 + return;
54777 +}
54778 +
54779 +int
54780 +gr_set_acls(const int type)
54781 +{
54782 + return 0;
54783 +}
54784 +
54785 +int
54786 +gr_check_hidden_task(const struct task_struct *tsk)
54787 +{
54788 + return 0;
54789 +}
54790 +
54791 +int
54792 +gr_check_protected_task(const struct task_struct *task)
54793 +{
54794 + return 0;
54795 +}
54796 +
54797 +int
54798 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54799 +{
54800 + return 0;
54801 +}
54802 +
54803 +void
54804 +gr_copy_label(struct task_struct *tsk)
54805 +{
54806 + return;
54807 +}
54808 +
54809 +void
54810 +gr_set_pax_flags(struct task_struct *task)
54811 +{
54812 + return;
54813 +}
54814 +
54815 +int
54816 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54817 + const int unsafe_share)
54818 +{
54819 + return 0;
54820 +}
54821 +
54822 +void
54823 +gr_handle_delete(const ino_t ino, const dev_t dev)
54824 +{
54825 + return;
54826 +}
54827 +
54828 +void
54829 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54830 +{
54831 + return;
54832 +}
54833 +
54834 +void
54835 +gr_handle_crash(struct task_struct *task, const int sig)
54836 +{
54837 + return;
54838 +}
54839 +
54840 +int
54841 +gr_check_crash_exec(const struct file *filp)
54842 +{
54843 + return 0;
54844 +}
54845 +
54846 +int
54847 +gr_check_crash_uid(const uid_t uid)
54848 +{
54849 + return 0;
54850 +}
54851 +
54852 +void
54853 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54854 + struct dentry *old_dentry,
54855 + struct dentry *new_dentry,
54856 + struct vfsmount *mnt, const __u8 replace)
54857 +{
54858 + return;
54859 +}
54860 +
54861 +int
54862 +gr_search_socket(const int family, const int type, const int protocol)
54863 +{
54864 + return 1;
54865 +}
54866 +
54867 +int
54868 +gr_search_connectbind(const int mode, const struct socket *sock,
54869 + const struct sockaddr_in *addr)
54870 +{
54871 + return 0;
54872 +}
54873 +
54874 +void
54875 +gr_handle_alertkill(struct task_struct *task)
54876 +{
54877 + return;
54878 +}
54879 +
54880 +__u32
54881 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54882 +{
54883 + return 1;
54884 +}
54885 +
54886 +__u32
54887 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54888 + const struct vfsmount * mnt)
54889 +{
54890 + return 1;
54891 +}
54892 +
54893 +__u32
54894 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54895 + int acc_mode)
54896 +{
54897 + return 1;
54898 +}
54899 +
54900 +__u32
54901 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54902 +{
54903 + return 1;
54904 +}
54905 +
54906 +__u32
54907 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54908 +{
54909 + return 1;
54910 +}
54911 +
54912 +int
54913 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54914 + unsigned int *vm_flags)
54915 +{
54916 + return 1;
54917 +}
54918 +
54919 +__u32
54920 +gr_acl_handle_truncate(const struct dentry * dentry,
54921 + const struct vfsmount * mnt)
54922 +{
54923 + return 1;
54924 +}
54925 +
54926 +__u32
54927 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54928 +{
54929 + return 1;
54930 +}
54931 +
54932 +__u32
54933 +gr_acl_handle_access(const struct dentry * dentry,
54934 + const struct vfsmount * mnt, const int fmode)
54935 +{
54936 + return 1;
54937 +}
54938 +
54939 +__u32
54940 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54941 + umode_t *mode)
54942 +{
54943 + return 1;
54944 +}
54945 +
54946 +__u32
54947 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54948 +{
54949 + return 1;
54950 +}
54951 +
54952 +__u32
54953 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54954 +{
54955 + return 1;
54956 +}
54957 +
54958 +void
54959 +grsecurity_init(void)
54960 +{
54961 + return;
54962 +}
54963 +
54964 +umode_t gr_acl_umask(void)
54965 +{
54966 + return 0;
54967 +}
54968 +
54969 +__u32
54970 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54971 + const struct dentry * parent_dentry,
54972 + const struct vfsmount * parent_mnt,
54973 + const int mode)
54974 +{
54975 + return 1;
54976 +}
54977 +
54978 +__u32
54979 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
54980 + const struct dentry * parent_dentry,
54981 + const struct vfsmount * parent_mnt)
54982 +{
54983 + return 1;
54984 +}
54985 +
54986 +__u32
54987 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54988 + const struct dentry * parent_dentry,
54989 + const struct vfsmount * parent_mnt, const char *from)
54990 +{
54991 + return 1;
54992 +}
54993 +
54994 +__u32
54995 +gr_acl_handle_link(const struct dentry * new_dentry,
54996 + const struct dentry * parent_dentry,
54997 + const struct vfsmount * parent_mnt,
54998 + const struct dentry * old_dentry,
54999 + const struct vfsmount * old_mnt, const char *to)
55000 +{
55001 + return 1;
55002 +}
55003 +
55004 +int
55005 +gr_acl_handle_rename(const struct dentry *new_dentry,
55006 + const struct dentry *parent_dentry,
55007 + const struct vfsmount *parent_mnt,
55008 + const struct dentry *old_dentry,
55009 + const struct inode *old_parent_inode,
55010 + const struct vfsmount *old_mnt, const char *newname)
55011 +{
55012 + return 0;
55013 +}
55014 +
55015 +int
55016 +gr_acl_handle_filldir(const struct file *file, const char *name,
55017 + const int namelen, const ino_t ino)
55018 +{
55019 + return 1;
55020 +}
55021 +
55022 +int
55023 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55024 + const time_t shm_createtime, const uid_t cuid, const int shmid)
55025 +{
55026 + return 1;
55027 +}
55028 +
55029 +int
55030 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
55031 +{
55032 + return 0;
55033 +}
55034 +
55035 +int
55036 +gr_search_accept(const struct socket *sock)
55037 +{
55038 + return 0;
55039 +}
55040 +
55041 +int
55042 +gr_search_listen(const struct socket *sock)
55043 +{
55044 + return 0;
55045 +}
55046 +
55047 +int
55048 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
55049 +{
55050 + return 0;
55051 +}
55052 +
55053 +__u32
55054 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
55055 +{
55056 + return 1;
55057 +}
55058 +
55059 +__u32
55060 +gr_acl_handle_creat(const struct dentry * dentry,
55061 + const struct dentry * p_dentry,
55062 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55063 + const int imode)
55064 +{
55065 + return 1;
55066 +}
55067 +
55068 +void
55069 +gr_acl_handle_exit(void)
55070 +{
55071 + return;
55072 +}
55073 +
55074 +int
55075 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55076 +{
55077 + return 1;
55078 +}
55079 +
55080 +void
55081 +gr_set_role_label(const uid_t uid, const gid_t gid)
55082 +{
55083 + return;
55084 +}
55085 +
55086 +int
55087 +gr_acl_handle_procpidmem(const struct task_struct *task)
55088 +{
55089 + return 0;
55090 +}
55091 +
55092 +int
55093 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
55094 +{
55095 + return 0;
55096 +}
55097 +
55098 +int
55099 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
55100 +{
55101 + return 0;
55102 +}
55103 +
55104 +void
55105 +gr_set_kernel_label(struct task_struct *task)
55106 +{
55107 + return;
55108 +}
55109 +
55110 +int
55111 +gr_check_user_change(int real, int effective, int fs)
55112 +{
55113 + return 0;
55114 +}
55115 +
55116 +int
55117 +gr_check_group_change(int real, int effective, int fs)
55118 +{
55119 + return 0;
55120 +}
55121 +
55122 +int gr_acl_enable_at_secure(void)
55123 +{
55124 + return 0;
55125 +}
55126 +
55127 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
55128 +{
55129 + return dentry->d_inode->i_sb->s_dev;
55130 +}
55131 +
55132 +EXPORT_SYMBOL(gr_learn_resource);
55133 +EXPORT_SYMBOL(gr_set_kernel_label);
55134 +#ifdef CONFIG_SECURITY
55135 +EXPORT_SYMBOL(gr_check_user_change);
55136 +EXPORT_SYMBOL(gr_check_group_change);
55137 +#endif
55138 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
55139 new file mode 100644
55140 index 0000000..2b05ada
55141 --- /dev/null
55142 +++ b/grsecurity/grsec_exec.c
55143 @@ -0,0 +1,146 @@
55144 +#include <linux/kernel.h>
55145 +#include <linux/sched.h>
55146 +#include <linux/file.h>
55147 +#include <linux/binfmts.h>
55148 +#include <linux/fs.h>
55149 +#include <linux/types.h>
55150 +#include <linux/grdefs.h>
55151 +#include <linux/grsecurity.h>
55152 +#include <linux/grinternal.h>
55153 +#include <linux/capability.h>
55154 +#include <linux/module.h>
55155 +
55156 +#include <asm/uaccess.h>
55157 +
55158 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55159 +static char gr_exec_arg_buf[132];
55160 +static DEFINE_MUTEX(gr_exec_arg_mutex);
55161 +#endif
55162 +
55163 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
55164 +
55165 +void
55166 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
55167 +{
55168 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55169 + char *grarg = gr_exec_arg_buf;
55170 + unsigned int i, x, execlen = 0;
55171 + char c;
55172 +
55173 + if (!((grsec_enable_execlog && grsec_enable_group &&
55174 + in_group_p(grsec_audit_gid))
55175 + || (grsec_enable_execlog && !grsec_enable_group)))
55176 + return;
55177 +
55178 + mutex_lock(&gr_exec_arg_mutex);
55179 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
55180 +
55181 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
55182 + const char __user *p;
55183 + unsigned int len;
55184 +
55185 + p = get_user_arg_ptr(argv, i);
55186 + if (IS_ERR(p))
55187 + goto log;
55188 +
55189 + len = strnlen_user(p, 128 - execlen);
55190 + if (len > 128 - execlen)
55191 + len = 128 - execlen;
55192 + else if (len > 0)
55193 + len--;
55194 + if (copy_from_user(grarg + execlen, p, len))
55195 + goto log;
55196 +
55197 + /* rewrite unprintable characters */
55198 + for (x = 0; x < len; x++) {
55199 + c = *(grarg + execlen + x);
55200 + if (c < 32 || c > 126)
55201 + *(grarg + execlen + x) = ' ';
55202 + }
55203 +
55204 + execlen += len;
55205 + *(grarg + execlen) = ' ';
55206 + *(grarg + execlen + 1) = '\0';
55207 + execlen++;
55208 + }
55209 +
55210 + log:
55211 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
55212 + bprm->file->f_path.mnt, grarg);
55213 + mutex_unlock(&gr_exec_arg_mutex);
55214 +#endif
55215 + return;
55216 +}
55217 +
55218 +#ifdef CONFIG_GRKERNSEC
55219 +extern int gr_acl_is_capable(const int cap);
55220 +extern int gr_acl_is_capable_nolog(const int cap);
55221 +extern int gr_chroot_is_capable(const int cap);
55222 +extern int gr_chroot_is_capable_nolog(const int cap);
55223 +#endif
55224 +
55225 +const char *captab_log[] = {
55226 + "CAP_CHOWN",
55227 + "CAP_DAC_OVERRIDE",
55228 + "CAP_DAC_READ_SEARCH",
55229 + "CAP_FOWNER",
55230 + "CAP_FSETID",
55231 + "CAP_KILL",
55232 + "CAP_SETGID",
55233 + "CAP_SETUID",
55234 + "CAP_SETPCAP",
55235 + "CAP_LINUX_IMMUTABLE",
55236 + "CAP_NET_BIND_SERVICE",
55237 + "CAP_NET_BROADCAST",
55238 + "CAP_NET_ADMIN",
55239 + "CAP_NET_RAW",
55240 + "CAP_IPC_LOCK",
55241 + "CAP_IPC_OWNER",
55242 + "CAP_SYS_MODULE",
55243 + "CAP_SYS_RAWIO",
55244 + "CAP_SYS_CHROOT",
55245 + "CAP_SYS_PTRACE",
55246 + "CAP_SYS_PACCT",
55247 + "CAP_SYS_ADMIN",
55248 + "CAP_SYS_BOOT",
55249 + "CAP_SYS_NICE",
55250 + "CAP_SYS_RESOURCE",
55251 + "CAP_SYS_TIME",
55252 + "CAP_SYS_TTY_CONFIG",
55253 + "CAP_MKNOD",
55254 + "CAP_LEASE",
55255 + "CAP_AUDIT_WRITE",
55256 + "CAP_AUDIT_CONTROL",
55257 + "CAP_SETFCAP",
55258 + "CAP_MAC_OVERRIDE",
55259 + "CAP_MAC_ADMIN",
55260 + "CAP_SYSLOG",
55261 + "CAP_WAKE_ALARM"
55262 +};
55263 +
55264 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
55265 +
55266 +int gr_is_capable(const int cap)
55267 +{
55268 +#ifdef CONFIG_GRKERNSEC
55269 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
55270 + return 1;
55271 + return 0;
55272 +#else
55273 + return 1;
55274 +#endif
55275 +}
55276 +
55277 +int gr_is_capable_nolog(const int cap)
55278 +{
55279 +#ifdef CONFIG_GRKERNSEC
55280 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
55281 + return 1;
55282 + return 0;
55283 +#else
55284 + return 1;
55285 +#endif
55286 +}
55287 +
55288 +EXPORT_SYMBOL(gr_is_capable);
55289 +EXPORT_SYMBOL(gr_is_capable_nolog);
55290 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
55291 new file mode 100644
55292 index 0000000..d3ee748
55293 --- /dev/null
55294 +++ b/grsecurity/grsec_fifo.c
55295 @@ -0,0 +1,24 @@
55296 +#include <linux/kernel.h>
55297 +#include <linux/sched.h>
55298 +#include <linux/fs.h>
55299 +#include <linux/file.h>
55300 +#include <linux/grinternal.h>
55301 +
55302 +int
55303 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
55304 + const struct dentry *dir, const int flag, const int acc_mode)
55305 +{
55306 +#ifdef CONFIG_GRKERNSEC_FIFO
55307 + const struct cred *cred = current_cred();
55308 +
55309 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
55310 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
55311 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
55312 + (cred->fsuid != dentry->d_inode->i_uid)) {
55313 + if (!inode_permission(dentry->d_inode, acc_mode))
55314 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
55315 + return -EACCES;
55316 + }
55317 +#endif
55318 + return 0;
55319 +}
55320 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
55321 new file mode 100644
55322 index 0000000..8ca18bf
55323 --- /dev/null
55324 +++ b/grsecurity/grsec_fork.c
55325 @@ -0,0 +1,23 @@
55326 +#include <linux/kernel.h>
55327 +#include <linux/sched.h>
55328 +#include <linux/grsecurity.h>
55329 +#include <linux/grinternal.h>
55330 +#include <linux/errno.h>
55331 +
55332 +void
55333 +gr_log_forkfail(const int retval)
55334 +{
55335 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55336 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
55337 + switch (retval) {
55338 + case -EAGAIN:
55339 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
55340 + break;
55341 + case -ENOMEM:
55342 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
55343 + break;
55344 + }
55345 + }
55346 +#endif
55347 + return;
55348 +}
55349 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
55350 new file mode 100644
55351 index 0000000..01ddde4
55352 --- /dev/null
55353 +++ b/grsecurity/grsec_init.c
55354 @@ -0,0 +1,277 @@
55355 +#include <linux/kernel.h>
55356 +#include <linux/sched.h>
55357 +#include <linux/mm.h>
55358 +#include <linux/gracl.h>
55359 +#include <linux/slab.h>
55360 +#include <linux/vmalloc.h>
55361 +#include <linux/percpu.h>
55362 +#include <linux/module.h>
55363 +
55364 +int grsec_enable_ptrace_readexec;
55365 +int grsec_enable_setxid;
55366 +int grsec_enable_brute;
55367 +int grsec_enable_link;
55368 +int grsec_enable_dmesg;
55369 +int grsec_enable_harden_ptrace;
55370 +int grsec_enable_fifo;
55371 +int grsec_enable_execlog;
55372 +int grsec_enable_signal;
55373 +int grsec_enable_forkfail;
55374 +int grsec_enable_audit_ptrace;
55375 +int grsec_enable_time;
55376 +int grsec_enable_audit_textrel;
55377 +int grsec_enable_group;
55378 +int grsec_audit_gid;
55379 +int grsec_enable_chdir;
55380 +int grsec_enable_mount;
55381 +int grsec_enable_rofs;
55382 +int grsec_enable_chroot_findtask;
55383 +int grsec_enable_chroot_mount;
55384 +int grsec_enable_chroot_shmat;
55385 +int grsec_enable_chroot_fchdir;
55386 +int grsec_enable_chroot_double;
55387 +int grsec_enable_chroot_pivot;
55388 +int grsec_enable_chroot_chdir;
55389 +int grsec_enable_chroot_chmod;
55390 +int grsec_enable_chroot_mknod;
55391 +int grsec_enable_chroot_nice;
55392 +int grsec_enable_chroot_execlog;
55393 +int grsec_enable_chroot_caps;
55394 +int grsec_enable_chroot_sysctl;
55395 +int grsec_enable_chroot_unix;
55396 +int grsec_enable_tpe;
55397 +int grsec_tpe_gid;
55398 +int grsec_enable_blackhole;
55399 +#ifdef CONFIG_IPV6_MODULE
55400 +EXPORT_SYMBOL(grsec_enable_blackhole);
55401 +#endif
55402 +int grsec_lastack_retries;
55403 +int grsec_enable_tpe_all;
55404 +int grsec_enable_tpe_invert;
55405 +int grsec_enable_socket_all;
55406 +int grsec_socket_all_gid;
55407 +int grsec_enable_socket_client;
55408 +int grsec_socket_client_gid;
55409 +int grsec_enable_socket_server;
55410 +int grsec_socket_server_gid;
55411 +int grsec_resource_logging;
55412 +int grsec_disable_privio;
55413 +int grsec_enable_log_rwxmaps;
55414 +int grsec_lock;
55415 +
55416 +DEFINE_SPINLOCK(grsec_alert_lock);
55417 +unsigned long grsec_alert_wtime = 0;
55418 +unsigned long grsec_alert_fyet = 0;
55419 +
55420 +DEFINE_SPINLOCK(grsec_audit_lock);
55421 +
55422 +DEFINE_RWLOCK(grsec_exec_file_lock);
55423 +
55424 +char *gr_shared_page[4];
55425 +
55426 +char *gr_alert_log_fmt;
55427 +char *gr_audit_log_fmt;
55428 +char *gr_alert_log_buf;
55429 +char *gr_audit_log_buf;
55430 +
55431 +extern struct gr_arg *gr_usermode;
55432 +extern unsigned char *gr_system_salt;
55433 +extern unsigned char *gr_system_sum;
55434 +
55435 +void __init
55436 +grsecurity_init(void)
55437 +{
55438 + int j;
55439 + /* create the per-cpu shared pages */
55440 +
55441 +#ifdef CONFIG_X86
55442 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55443 +#endif
55444 +
55445 + for (j = 0; j < 4; j++) {
55446 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55447 + if (gr_shared_page[j] == NULL) {
55448 + panic("Unable to allocate grsecurity shared page");
55449 + return;
55450 + }
55451 + }
55452 +
55453 + /* allocate log buffers */
55454 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55455 + if (!gr_alert_log_fmt) {
55456 + panic("Unable to allocate grsecurity alert log format buffer");
55457 + return;
55458 + }
55459 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55460 + if (!gr_audit_log_fmt) {
55461 + panic("Unable to allocate grsecurity audit log format buffer");
55462 + return;
55463 + }
55464 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55465 + if (!gr_alert_log_buf) {
55466 + panic("Unable to allocate grsecurity alert log buffer");
55467 + return;
55468 + }
55469 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55470 + if (!gr_audit_log_buf) {
55471 + panic("Unable to allocate grsecurity audit log buffer");
55472 + return;
55473 + }
55474 +
55475 + /* allocate memory for authentication structure */
55476 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55477 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55478 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55479 +
55480 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55481 + panic("Unable to allocate grsecurity authentication structure");
55482 + return;
55483 + }
55484 +
55485 +
55486 +#ifdef CONFIG_GRKERNSEC_IO
55487 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55488 + grsec_disable_privio = 1;
55489 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55490 + grsec_disable_privio = 1;
55491 +#else
55492 + grsec_disable_privio = 0;
55493 +#endif
55494 +#endif
55495 +
55496 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55497 + /* for backward compatibility, tpe_invert always defaults to on if
55498 + enabled in the kernel
55499 + */
55500 + grsec_enable_tpe_invert = 1;
55501 +#endif
55502 +
55503 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55504 +#ifndef CONFIG_GRKERNSEC_SYSCTL
55505 + grsec_lock = 1;
55506 +#endif
55507 +
55508 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55509 + grsec_enable_audit_textrel = 1;
55510 +#endif
55511 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55512 + grsec_enable_log_rwxmaps = 1;
55513 +#endif
55514 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55515 + grsec_enable_group = 1;
55516 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55517 +#endif
55518 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55519 + grsec_enable_ptrace_readexec = 1;
55520 +#endif
55521 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55522 + grsec_enable_chdir = 1;
55523 +#endif
55524 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55525 + grsec_enable_harden_ptrace = 1;
55526 +#endif
55527 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55528 + grsec_enable_mount = 1;
55529 +#endif
55530 +#ifdef CONFIG_GRKERNSEC_LINK
55531 + grsec_enable_link = 1;
55532 +#endif
55533 +#ifdef CONFIG_GRKERNSEC_BRUTE
55534 + grsec_enable_brute = 1;
55535 +#endif
55536 +#ifdef CONFIG_GRKERNSEC_DMESG
55537 + grsec_enable_dmesg = 1;
55538 +#endif
55539 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55540 + grsec_enable_blackhole = 1;
55541 + grsec_lastack_retries = 4;
55542 +#endif
55543 +#ifdef CONFIG_GRKERNSEC_FIFO
55544 + grsec_enable_fifo = 1;
55545 +#endif
55546 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55547 + grsec_enable_execlog = 1;
55548 +#endif
55549 +#ifdef CONFIG_GRKERNSEC_SETXID
55550 + grsec_enable_setxid = 1;
55551 +#endif
55552 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55553 + grsec_enable_signal = 1;
55554 +#endif
55555 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55556 + grsec_enable_forkfail = 1;
55557 +#endif
55558 +#ifdef CONFIG_GRKERNSEC_TIME
55559 + grsec_enable_time = 1;
55560 +#endif
55561 +#ifdef CONFIG_GRKERNSEC_RESLOG
55562 + grsec_resource_logging = 1;
55563 +#endif
55564 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55565 + grsec_enable_chroot_findtask = 1;
55566 +#endif
55567 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55568 + grsec_enable_chroot_unix = 1;
55569 +#endif
55570 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55571 + grsec_enable_chroot_mount = 1;
55572 +#endif
55573 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55574 + grsec_enable_chroot_fchdir = 1;
55575 +#endif
55576 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55577 + grsec_enable_chroot_shmat = 1;
55578 +#endif
55579 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55580 + grsec_enable_audit_ptrace = 1;
55581 +#endif
55582 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55583 + grsec_enable_chroot_double = 1;
55584 +#endif
55585 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55586 + grsec_enable_chroot_pivot = 1;
55587 +#endif
55588 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55589 + grsec_enable_chroot_chdir = 1;
55590 +#endif
55591 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55592 + grsec_enable_chroot_chmod = 1;
55593 +#endif
55594 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55595 + grsec_enable_chroot_mknod = 1;
55596 +#endif
55597 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55598 + grsec_enable_chroot_nice = 1;
55599 +#endif
55600 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55601 + grsec_enable_chroot_execlog = 1;
55602 +#endif
55603 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55604 + grsec_enable_chroot_caps = 1;
55605 +#endif
55606 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55607 + grsec_enable_chroot_sysctl = 1;
55608 +#endif
55609 +#ifdef CONFIG_GRKERNSEC_TPE
55610 + grsec_enable_tpe = 1;
55611 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55612 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55613 + grsec_enable_tpe_all = 1;
55614 +#endif
55615 +#endif
55616 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55617 + grsec_enable_socket_all = 1;
55618 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55619 +#endif
55620 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55621 + grsec_enable_socket_client = 1;
55622 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55623 +#endif
55624 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55625 + grsec_enable_socket_server = 1;
55626 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55627 +#endif
55628 +#endif
55629 +
55630 + return;
55631 +}
55632 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55633 new file mode 100644
55634 index 0000000..3efe141
55635 --- /dev/null
55636 +++ b/grsecurity/grsec_link.c
55637 @@ -0,0 +1,43 @@
55638 +#include <linux/kernel.h>
55639 +#include <linux/sched.h>
55640 +#include <linux/fs.h>
55641 +#include <linux/file.h>
55642 +#include <linux/grinternal.h>
55643 +
55644 +int
55645 +gr_handle_follow_link(const struct inode *parent,
55646 + const struct inode *inode,
55647 + const struct dentry *dentry, const struct vfsmount *mnt)
55648 +{
55649 +#ifdef CONFIG_GRKERNSEC_LINK
55650 + const struct cred *cred = current_cred();
55651 +
55652 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55653 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55654 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55655 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55656 + return -EACCES;
55657 + }
55658 +#endif
55659 + return 0;
55660 +}
55661 +
55662 +int
55663 +gr_handle_hardlink(const struct dentry *dentry,
55664 + const struct vfsmount *mnt,
55665 + struct inode *inode, const int mode, const char *to)
55666 +{
55667 +#ifdef CONFIG_GRKERNSEC_LINK
55668 + const struct cred *cred = current_cred();
55669 +
55670 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55671 + (!S_ISREG(mode) || (mode & S_ISUID) ||
55672 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55673 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55674 + !capable(CAP_FOWNER) && cred->uid) {
55675 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55676 + return -EPERM;
55677 + }
55678 +#endif
55679 + return 0;
55680 +}
55681 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55682 new file mode 100644
55683 index 0000000..a45d2e9
55684 --- /dev/null
55685 +++ b/grsecurity/grsec_log.c
55686 @@ -0,0 +1,322 @@
55687 +#include <linux/kernel.h>
55688 +#include <linux/sched.h>
55689 +#include <linux/file.h>
55690 +#include <linux/tty.h>
55691 +#include <linux/fs.h>
55692 +#include <linux/grinternal.h>
55693 +
55694 +#ifdef CONFIG_TREE_PREEMPT_RCU
55695 +#define DISABLE_PREEMPT() preempt_disable()
55696 +#define ENABLE_PREEMPT() preempt_enable()
55697 +#else
55698 +#define DISABLE_PREEMPT()
55699 +#define ENABLE_PREEMPT()
55700 +#endif
55701 +
55702 +#define BEGIN_LOCKS(x) \
55703 + DISABLE_PREEMPT(); \
55704 + rcu_read_lock(); \
55705 + read_lock(&tasklist_lock); \
55706 + read_lock(&grsec_exec_file_lock); \
55707 + if (x != GR_DO_AUDIT) \
55708 + spin_lock(&grsec_alert_lock); \
55709 + else \
55710 + spin_lock(&grsec_audit_lock)
55711 +
55712 +#define END_LOCKS(x) \
55713 + if (x != GR_DO_AUDIT) \
55714 + spin_unlock(&grsec_alert_lock); \
55715 + else \
55716 + spin_unlock(&grsec_audit_lock); \
55717 + read_unlock(&grsec_exec_file_lock); \
55718 + read_unlock(&tasklist_lock); \
55719 + rcu_read_unlock(); \
55720 + ENABLE_PREEMPT(); \
55721 + if (x == GR_DONT_AUDIT) \
55722 + gr_handle_alertkill(current)
55723 +
55724 +enum {
55725 + FLOODING,
55726 + NO_FLOODING
55727 +};
55728 +
55729 +extern char *gr_alert_log_fmt;
55730 +extern char *gr_audit_log_fmt;
55731 +extern char *gr_alert_log_buf;
55732 +extern char *gr_audit_log_buf;
55733 +
55734 +static int gr_log_start(int audit)
55735 +{
55736 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55737 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55738 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55739 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55740 + unsigned long curr_secs = get_seconds();
55741 +
55742 + if (audit == GR_DO_AUDIT)
55743 + goto set_fmt;
55744 +
55745 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55746 + grsec_alert_wtime = curr_secs;
55747 + grsec_alert_fyet = 0;
55748 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55749 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55750 + grsec_alert_fyet++;
55751 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55752 + grsec_alert_wtime = curr_secs;
55753 + grsec_alert_fyet++;
55754 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55755 + return FLOODING;
55756 + }
55757 + else return FLOODING;
55758 +
55759 +set_fmt:
55760 +#endif
55761 + memset(buf, 0, PAGE_SIZE);
55762 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
55763 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55764 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55765 + } else if (current->signal->curr_ip) {
55766 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55767 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55768 + } else if (gr_acl_is_enabled()) {
55769 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55770 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55771 + } else {
55772 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
55773 + strcpy(buf, fmt);
55774 + }
55775 +
55776 + return NO_FLOODING;
55777 +}
55778 +
55779 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55780 + __attribute__ ((format (printf, 2, 0)));
55781 +
55782 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55783 +{
55784 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55785 + unsigned int len = strlen(buf);
55786 +
55787 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55788 +
55789 + return;
55790 +}
55791 +
55792 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55793 + __attribute__ ((format (printf, 2, 3)));
55794 +
55795 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55796 +{
55797 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55798 + unsigned int len = strlen(buf);
55799 + va_list ap;
55800 +
55801 + va_start(ap, msg);
55802 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55803 + va_end(ap);
55804 +
55805 + return;
55806 +}
55807 +
55808 +static void gr_log_end(int audit, int append_default)
55809 +{
55810 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55811 +
55812 + if (append_default) {
55813 + unsigned int len = strlen(buf);
55814 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55815 + }
55816 +
55817 + printk("%s\n", buf);
55818 +
55819 + return;
55820 +}
55821 +
55822 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55823 +{
55824 + int logtype;
55825 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55826 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55827 + void *voidptr = NULL;
55828 + int num1 = 0, num2 = 0;
55829 + unsigned long ulong1 = 0, ulong2 = 0;
55830 + struct dentry *dentry = NULL;
55831 + struct vfsmount *mnt = NULL;
55832 + struct file *file = NULL;
55833 + struct task_struct *task = NULL;
55834 + const struct cred *cred, *pcred;
55835 + va_list ap;
55836 +
55837 + BEGIN_LOCKS(audit);
55838 + logtype = gr_log_start(audit);
55839 + if (logtype == FLOODING) {
55840 + END_LOCKS(audit);
55841 + return;
55842 + }
55843 + va_start(ap, argtypes);
55844 + switch (argtypes) {
55845 + case GR_TTYSNIFF:
55846 + task = va_arg(ap, struct task_struct *);
55847 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55848 + break;
55849 + case GR_SYSCTL_HIDDEN:
55850 + str1 = va_arg(ap, char *);
55851 + gr_log_middle_varargs(audit, msg, result, str1);
55852 + break;
55853 + case GR_RBAC:
55854 + dentry = va_arg(ap, struct dentry *);
55855 + mnt = va_arg(ap, struct vfsmount *);
55856 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55857 + break;
55858 + case GR_RBAC_STR:
55859 + dentry = va_arg(ap, struct dentry *);
55860 + mnt = va_arg(ap, struct vfsmount *);
55861 + str1 = va_arg(ap, char *);
55862 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55863 + break;
55864 + case GR_STR_RBAC:
55865 + str1 = va_arg(ap, char *);
55866 + dentry = va_arg(ap, struct dentry *);
55867 + mnt = va_arg(ap, struct vfsmount *);
55868 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55869 + break;
55870 + case GR_RBAC_MODE2:
55871 + dentry = va_arg(ap, struct dentry *);
55872 + mnt = va_arg(ap, struct vfsmount *);
55873 + str1 = va_arg(ap, char *);
55874 + str2 = va_arg(ap, char *);
55875 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55876 + break;
55877 + case GR_RBAC_MODE3:
55878 + dentry = va_arg(ap, struct dentry *);
55879 + mnt = va_arg(ap, struct vfsmount *);
55880 + str1 = va_arg(ap, char *);
55881 + str2 = va_arg(ap, char *);
55882 + str3 = va_arg(ap, char *);
55883 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55884 + break;
55885 + case GR_FILENAME:
55886 + dentry = va_arg(ap, struct dentry *);
55887 + mnt = va_arg(ap, struct vfsmount *);
55888 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55889 + break;
55890 + case GR_STR_FILENAME:
55891 + str1 = va_arg(ap, char *);
55892 + dentry = va_arg(ap, struct dentry *);
55893 + mnt = va_arg(ap, struct vfsmount *);
55894 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55895 + break;
55896 + case GR_FILENAME_STR:
55897 + dentry = va_arg(ap, struct dentry *);
55898 + mnt = va_arg(ap, struct vfsmount *);
55899 + str1 = va_arg(ap, char *);
55900 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55901 + break;
55902 + case GR_FILENAME_TWO_INT:
55903 + dentry = va_arg(ap, struct dentry *);
55904 + mnt = va_arg(ap, struct vfsmount *);
55905 + num1 = va_arg(ap, int);
55906 + num2 = va_arg(ap, int);
55907 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55908 + break;
55909 + case GR_FILENAME_TWO_INT_STR:
55910 + dentry = va_arg(ap, struct dentry *);
55911 + mnt = va_arg(ap, struct vfsmount *);
55912 + num1 = va_arg(ap, int);
55913 + num2 = va_arg(ap, int);
55914 + str1 = va_arg(ap, char *);
55915 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55916 + break;
55917 + case GR_TEXTREL:
55918 + file = va_arg(ap, struct file *);
55919 + ulong1 = va_arg(ap, unsigned long);
55920 + ulong2 = va_arg(ap, unsigned long);
55921 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55922 + break;
55923 + case GR_PTRACE:
55924 + task = va_arg(ap, struct task_struct *);
55925 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55926 + break;
55927 + case GR_RESOURCE:
55928 + task = va_arg(ap, struct task_struct *);
55929 + cred = __task_cred(task);
55930 + pcred = __task_cred(task->real_parent);
55931 + ulong1 = va_arg(ap, unsigned long);
55932 + str1 = va_arg(ap, char *);
55933 + ulong2 = va_arg(ap, unsigned long);
55934 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55935 + break;
55936 + case GR_CAP:
55937 + task = va_arg(ap, struct task_struct *);
55938 + cred = __task_cred(task);
55939 + pcred = __task_cred(task->real_parent);
55940 + str1 = va_arg(ap, char *);
55941 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55942 + break;
55943 + case GR_SIG:
55944 + str1 = va_arg(ap, char *);
55945 + voidptr = va_arg(ap, void *);
55946 + gr_log_middle_varargs(audit, msg, str1, voidptr);
55947 + break;
55948 + case GR_SIG2:
55949 + task = va_arg(ap, struct task_struct *);
55950 + cred = __task_cred(task);
55951 + pcred = __task_cred(task->real_parent);
55952 + num1 = va_arg(ap, int);
55953 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55954 + break;
55955 + case GR_CRASH1:
55956 + task = va_arg(ap, struct task_struct *);
55957 + cred = __task_cred(task);
55958 + pcred = __task_cred(task->real_parent);
55959 + ulong1 = va_arg(ap, unsigned long);
55960 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55961 + break;
55962 + case GR_CRASH2:
55963 + task = va_arg(ap, struct task_struct *);
55964 + cred = __task_cred(task);
55965 + pcred = __task_cred(task->real_parent);
55966 + ulong1 = va_arg(ap, unsigned long);
55967 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55968 + break;
55969 + case GR_RWXMAP:
55970 + file = va_arg(ap, struct file *);
55971 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55972 + break;
55973 + case GR_PSACCT:
55974 + {
55975 + unsigned int wday, cday;
55976 + __u8 whr, chr;
55977 + __u8 wmin, cmin;
55978 + __u8 wsec, csec;
55979 + char cur_tty[64] = { 0 };
55980 + char parent_tty[64] = { 0 };
55981 +
55982 + task = va_arg(ap, struct task_struct *);
55983 + wday = va_arg(ap, unsigned int);
55984 + cday = va_arg(ap, unsigned int);
55985 + whr = va_arg(ap, int);
55986 + chr = va_arg(ap, int);
55987 + wmin = va_arg(ap, int);
55988 + cmin = va_arg(ap, int);
55989 + wsec = va_arg(ap, int);
55990 + csec = va_arg(ap, int);
55991 + ulong1 = va_arg(ap, unsigned long);
55992 + cred = __task_cred(task);
55993 + pcred = __task_cred(task->real_parent);
55994 +
55995 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55996 + }
55997 + break;
55998 + default:
55999 + gr_log_middle(audit, msg, ap);
56000 + }
56001 + va_end(ap);
56002 + // these don't need DEFAULTSECARGS printed on the end
56003 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
56004 + gr_log_end(audit, 0);
56005 + else
56006 + gr_log_end(audit, 1);
56007 + END_LOCKS(audit);
56008 +}
56009 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
56010 new file mode 100644
56011 index 0000000..f536303
56012 --- /dev/null
56013 +++ b/grsecurity/grsec_mem.c
56014 @@ -0,0 +1,40 @@
56015 +#include <linux/kernel.h>
56016 +#include <linux/sched.h>
56017 +#include <linux/mm.h>
56018 +#include <linux/mman.h>
56019 +#include <linux/grinternal.h>
56020 +
56021 +void
56022 +gr_handle_ioperm(void)
56023 +{
56024 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
56025 + return;
56026 +}
56027 +
56028 +void
56029 +gr_handle_iopl(void)
56030 +{
56031 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
56032 + return;
56033 +}
56034 +
56035 +void
56036 +gr_handle_mem_readwrite(u64 from, u64 to)
56037 +{
56038 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
56039 + return;
56040 +}
56041 +
56042 +void
56043 +gr_handle_vm86(void)
56044 +{
56045 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
56046 + return;
56047 +}
56048 +
56049 +void
56050 +gr_log_badprocpid(const char *entry)
56051 +{
56052 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
56053 + return;
56054 +}
56055 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
56056 new file mode 100644
56057 index 0000000..2131422
56058 --- /dev/null
56059 +++ b/grsecurity/grsec_mount.c
56060 @@ -0,0 +1,62 @@
56061 +#include <linux/kernel.h>
56062 +#include <linux/sched.h>
56063 +#include <linux/mount.h>
56064 +#include <linux/grsecurity.h>
56065 +#include <linux/grinternal.h>
56066 +
56067 +void
56068 +gr_log_remount(const char *devname, const int retval)
56069 +{
56070 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56071 + if (grsec_enable_mount && (retval >= 0))
56072 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
56073 +#endif
56074 + return;
56075 +}
56076 +
56077 +void
56078 +gr_log_unmount(const char *devname, const int retval)
56079 +{
56080 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56081 + if (grsec_enable_mount && (retval >= 0))
56082 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
56083 +#endif
56084 + return;
56085 +}
56086 +
56087 +void
56088 +gr_log_mount(const char *from, const char *to, const int retval)
56089 +{
56090 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56091 + if (grsec_enable_mount && (retval >= 0))
56092 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
56093 +#endif
56094 + return;
56095 +}
56096 +
56097 +int
56098 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
56099 +{
56100 +#ifdef CONFIG_GRKERNSEC_ROFS
56101 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
56102 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
56103 + return -EPERM;
56104 + } else
56105 + return 0;
56106 +#endif
56107 + return 0;
56108 +}
56109 +
56110 +int
56111 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
56112 +{
56113 +#ifdef CONFIG_GRKERNSEC_ROFS
56114 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
56115 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
56116 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
56117 + return -EPERM;
56118 + } else
56119 + return 0;
56120 +#endif
56121 + return 0;
56122 +}
56123 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
56124 new file mode 100644
56125 index 0000000..a3b12a0
56126 --- /dev/null
56127 +++ b/grsecurity/grsec_pax.c
56128 @@ -0,0 +1,36 @@
56129 +#include <linux/kernel.h>
56130 +#include <linux/sched.h>
56131 +#include <linux/mm.h>
56132 +#include <linux/file.h>
56133 +#include <linux/grinternal.h>
56134 +#include <linux/grsecurity.h>
56135 +
56136 +void
56137 +gr_log_textrel(struct vm_area_struct * vma)
56138 +{
56139 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56140 + if (grsec_enable_audit_textrel)
56141 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
56142 +#endif
56143 + return;
56144 +}
56145 +
56146 +void
56147 +gr_log_rwxmmap(struct file *file)
56148 +{
56149 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56150 + if (grsec_enable_log_rwxmaps)
56151 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
56152 +#endif
56153 + return;
56154 +}
56155 +
56156 +void
56157 +gr_log_rwxmprotect(struct file *file)
56158 +{
56159 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56160 + if (grsec_enable_log_rwxmaps)
56161 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
56162 +#endif
56163 + return;
56164 +}
56165 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
56166 new file mode 100644
56167 index 0000000..f7f29aa
56168 --- /dev/null
56169 +++ b/grsecurity/grsec_ptrace.c
56170 @@ -0,0 +1,30 @@
56171 +#include <linux/kernel.h>
56172 +#include <linux/sched.h>
56173 +#include <linux/grinternal.h>
56174 +#include <linux/security.h>
56175 +
56176 +void
56177 +gr_audit_ptrace(struct task_struct *task)
56178 +{
56179 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56180 + if (grsec_enable_audit_ptrace)
56181 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
56182 +#endif
56183 + return;
56184 +}
56185 +
56186 +int
56187 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
56188 +{
56189 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56190 + const struct dentry *dentry = file->f_path.dentry;
56191 + const struct vfsmount *mnt = file->f_path.mnt;
56192 +
56193 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
56194 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
56195 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
56196 + return -EACCES;
56197 + }
56198 +#endif
56199 + return 0;
56200 +}
56201 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
56202 new file mode 100644
56203 index 0000000..7a5b2de
56204 --- /dev/null
56205 +++ b/grsecurity/grsec_sig.c
56206 @@ -0,0 +1,207 @@
56207 +#include <linux/kernel.h>
56208 +#include <linux/sched.h>
56209 +#include <linux/delay.h>
56210 +#include <linux/grsecurity.h>
56211 +#include <linux/grinternal.h>
56212 +#include <linux/hardirq.h>
56213 +
56214 +char *signames[] = {
56215 + [SIGSEGV] = "Segmentation fault",
56216 + [SIGILL] = "Illegal instruction",
56217 + [SIGABRT] = "Abort",
56218 + [SIGBUS] = "Invalid alignment/Bus error"
56219 +};
56220 +
56221 +void
56222 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
56223 +{
56224 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56225 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
56226 + (sig == SIGABRT) || (sig == SIGBUS))) {
56227 + if (t->pid == current->pid) {
56228 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
56229 + } else {
56230 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
56231 + }
56232 + }
56233 +#endif
56234 + return;
56235 +}
56236 +
56237 +int
56238 +gr_handle_signal(const struct task_struct *p, const int sig)
56239 +{
56240 +#ifdef CONFIG_GRKERNSEC
56241 + /* ignore the 0 signal for protected task checks */
56242 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
56243 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
56244 + return -EPERM;
56245 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
56246 + return -EPERM;
56247 + }
56248 +#endif
56249 + return 0;
56250 +}
56251 +
56252 +#ifdef CONFIG_GRKERNSEC
56253 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
56254 +
56255 +int gr_fake_force_sig(int sig, struct task_struct *t)
56256 +{
56257 + unsigned long int flags;
56258 + int ret, blocked, ignored;
56259 + struct k_sigaction *action;
56260 +
56261 + spin_lock_irqsave(&t->sighand->siglock, flags);
56262 + action = &t->sighand->action[sig-1];
56263 + ignored = action->sa.sa_handler == SIG_IGN;
56264 + blocked = sigismember(&t->blocked, sig);
56265 + if (blocked || ignored) {
56266 + action->sa.sa_handler = SIG_DFL;
56267 + if (blocked) {
56268 + sigdelset(&t->blocked, sig);
56269 + recalc_sigpending_and_wake(t);
56270 + }
56271 + }
56272 + if (action->sa.sa_handler == SIG_DFL)
56273 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
56274 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
56275 +
56276 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
56277 +
56278 + return ret;
56279 +}
56280 +#endif
56281 +
56282 +#ifdef CONFIG_GRKERNSEC_BRUTE
56283 +#define GR_USER_BAN_TIME (15 * 60)
56284 +
56285 +static int __get_dumpable(unsigned long mm_flags)
56286 +{
56287 + int ret;
56288 +
56289 + ret = mm_flags & MMF_DUMPABLE_MASK;
56290 + return (ret >= 2) ? 2 : ret;
56291 +}
56292 +#endif
56293 +
56294 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
56295 +{
56296 +#ifdef CONFIG_GRKERNSEC_BRUTE
56297 + uid_t uid = 0;
56298 +
56299 + if (!grsec_enable_brute)
56300 + return;
56301 +
56302 + rcu_read_lock();
56303 + read_lock(&tasklist_lock);
56304 + read_lock(&grsec_exec_file_lock);
56305 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
56306 + p->real_parent->brute = 1;
56307 + else {
56308 + const struct cred *cred = __task_cred(p), *cred2;
56309 + struct task_struct *tsk, *tsk2;
56310 +
56311 + if (!__get_dumpable(mm_flags) && cred->uid) {
56312 + struct user_struct *user;
56313 +
56314 + uid = cred->uid;
56315 +
56316 + /* this is put upon execution past expiration */
56317 + user = find_user(uid);
56318 + if (user == NULL)
56319 + goto unlock;
56320 + user->banned = 1;
56321 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
56322 + if (user->ban_expires == ~0UL)
56323 + user->ban_expires--;
56324 +
56325 + do_each_thread(tsk2, tsk) {
56326 + cred2 = __task_cred(tsk);
56327 + if (tsk != p && cred2->uid == uid)
56328 + gr_fake_force_sig(SIGKILL, tsk);
56329 + } while_each_thread(tsk2, tsk);
56330 + }
56331 + }
56332 +unlock:
56333 + read_unlock(&grsec_exec_file_lock);
56334 + read_unlock(&tasklist_lock);
56335 + rcu_read_unlock();
56336 +
56337 + if (uid)
56338 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
56339 +
56340 +#endif
56341 + return;
56342 +}
56343 +
56344 +void gr_handle_brute_check(void)
56345 +{
56346 +#ifdef CONFIG_GRKERNSEC_BRUTE
56347 + if (current->brute)
56348 + msleep(30 * 1000);
56349 +#endif
56350 + return;
56351 +}
56352 +
56353 +void gr_handle_kernel_exploit(void)
56354 +{
56355 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
56356 + const struct cred *cred;
56357 + struct task_struct *tsk, *tsk2;
56358 + struct user_struct *user;
56359 + uid_t uid;
56360 +
56361 + if (in_irq() || in_serving_softirq() || in_nmi())
56362 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
56363 +
56364 + uid = current_uid();
56365 +
56366 + if (uid == 0)
56367 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
56368 + else {
56369 + /* kill all the processes of this user, hold a reference
56370 + to their creds struct, and prevent them from creating
56371 + another process until system reset
56372 + */
56373 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56374 + /* we intentionally leak this ref */
56375 + user = get_uid(current->cred->user);
56376 + if (user) {
56377 + user->banned = 1;
56378 + user->ban_expires = ~0UL;
56379 + }
56380 +
56381 + read_lock(&tasklist_lock);
56382 + do_each_thread(tsk2, tsk) {
56383 + cred = __task_cred(tsk);
56384 + if (cred->uid == uid)
56385 + gr_fake_force_sig(SIGKILL, tsk);
56386 + } while_each_thread(tsk2, tsk);
56387 + read_unlock(&tasklist_lock);
56388 + }
56389 +#endif
56390 +}
56391 +
56392 +int __gr_process_user_ban(struct user_struct *user)
56393 +{
56394 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56395 + if (unlikely(user->banned)) {
56396 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56397 + user->banned = 0;
56398 + user->ban_expires = 0;
56399 + free_uid(user);
56400 + } else
56401 + return -EPERM;
56402 + }
56403 +#endif
56404 + return 0;
56405 +}
56406 +
56407 +int gr_process_user_ban(void)
56408 +{
56409 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56410 + return __gr_process_user_ban(current->cred->user);
56411 +#endif
56412 + return 0;
56413 +}
56414 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56415 new file mode 100644
56416 index 0000000..4030d57
56417 --- /dev/null
56418 +++ b/grsecurity/grsec_sock.c
56419 @@ -0,0 +1,244 @@
56420 +#include <linux/kernel.h>
56421 +#include <linux/module.h>
56422 +#include <linux/sched.h>
56423 +#include <linux/file.h>
56424 +#include <linux/net.h>
56425 +#include <linux/in.h>
56426 +#include <linux/ip.h>
56427 +#include <net/sock.h>
56428 +#include <net/inet_sock.h>
56429 +#include <linux/grsecurity.h>
56430 +#include <linux/grinternal.h>
56431 +#include <linux/gracl.h>
56432 +
56433 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56434 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56435 +
56436 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
56437 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
56438 +
56439 +#ifdef CONFIG_UNIX_MODULE
56440 +EXPORT_SYMBOL(gr_acl_handle_unix);
56441 +EXPORT_SYMBOL(gr_acl_handle_mknod);
56442 +EXPORT_SYMBOL(gr_handle_chroot_unix);
56443 +EXPORT_SYMBOL(gr_handle_create);
56444 +#endif
56445 +
56446 +#ifdef CONFIG_GRKERNSEC
56447 +#define gr_conn_table_size 32749
56448 +struct conn_table_entry {
56449 + struct conn_table_entry *next;
56450 + struct signal_struct *sig;
56451 +};
56452 +
56453 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56454 +DEFINE_SPINLOCK(gr_conn_table_lock);
56455 +
56456 +extern const char * gr_socktype_to_name(unsigned char type);
56457 +extern const char * gr_proto_to_name(unsigned char proto);
56458 +extern const char * gr_sockfamily_to_name(unsigned char family);
56459 +
56460 +static __inline__ int
56461 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56462 +{
56463 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56464 +}
56465 +
56466 +static __inline__ int
56467 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56468 + __u16 sport, __u16 dport)
56469 +{
56470 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56471 + sig->gr_sport == sport && sig->gr_dport == dport))
56472 + return 1;
56473 + else
56474 + return 0;
56475 +}
56476 +
56477 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56478 +{
56479 + struct conn_table_entry **match;
56480 + unsigned int index;
56481 +
56482 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56483 + sig->gr_sport, sig->gr_dport,
56484 + gr_conn_table_size);
56485 +
56486 + newent->sig = sig;
56487 +
56488 + match = &gr_conn_table[index];
56489 + newent->next = *match;
56490 + *match = newent;
56491 +
56492 + return;
56493 +}
56494 +
56495 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56496 +{
56497 + struct conn_table_entry *match, *last = NULL;
56498 + unsigned int index;
56499 +
56500 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56501 + sig->gr_sport, sig->gr_dport,
56502 + gr_conn_table_size);
56503 +
56504 + match = gr_conn_table[index];
56505 + while (match && !conn_match(match->sig,
56506 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56507 + sig->gr_dport)) {
56508 + last = match;
56509 + match = match->next;
56510 + }
56511 +
56512 + if (match) {
56513 + if (last)
56514 + last->next = match->next;
56515 + else
56516 + gr_conn_table[index] = NULL;
56517 + kfree(match);
56518 + }
56519 +
56520 + return;
56521 +}
56522 +
56523 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56524 + __u16 sport, __u16 dport)
56525 +{
56526 + struct conn_table_entry *match;
56527 + unsigned int index;
56528 +
56529 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56530 +
56531 + match = gr_conn_table[index];
56532 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56533 + match = match->next;
56534 +
56535 + if (match)
56536 + return match->sig;
56537 + else
56538 + return NULL;
56539 +}
56540 +
56541 +#endif
56542 +
56543 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56544 +{
56545 +#ifdef CONFIG_GRKERNSEC
56546 + struct signal_struct *sig = task->signal;
56547 + struct conn_table_entry *newent;
56548 +
56549 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56550 + if (newent == NULL)
56551 + return;
56552 + /* no bh lock needed since we are called with bh disabled */
56553 + spin_lock(&gr_conn_table_lock);
56554 + gr_del_task_from_ip_table_nolock(sig);
56555 + sig->gr_saddr = inet->inet_rcv_saddr;
56556 + sig->gr_daddr = inet->inet_daddr;
56557 + sig->gr_sport = inet->inet_sport;
56558 + sig->gr_dport = inet->inet_dport;
56559 + gr_add_to_task_ip_table_nolock(sig, newent);
56560 + spin_unlock(&gr_conn_table_lock);
56561 +#endif
56562 + return;
56563 +}
56564 +
56565 +void gr_del_task_from_ip_table(struct task_struct *task)
56566 +{
56567 +#ifdef CONFIG_GRKERNSEC
56568 + spin_lock_bh(&gr_conn_table_lock);
56569 + gr_del_task_from_ip_table_nolock(task->signal);
56570 + spin_unlock_bh(&gr_conn_table_lock);
56571 +#endif
56572 + return;
56573 +}
56574 +
56575 +void
56576 +gr_attach_curr_ip(const struct sock *sk)
56577 +{
56578 +#ifdef CONFIG_GRKERNSEC
56579 + struct signal_struct *p, *set;
56580 + const struct inet_sock *inet = inet_sk(sk);
56581 +
56582 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56583 + return;
56584 +
56585 + set = current->signal;
56586 +
56587 + spin_lock_bh(&gr_conn_table_lock);
56588 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56589 + inet->inet_dport, inet->inet_sport);
56590 + if (unlikely(p != NULL)) {
56591 + set->curr_ip = p->curr_ip;
56592 + set->used_accept = 1;
56593 + gr_del_task_from_ip_table_nolock(p);
56594 + spin_unlock_bh(&gr_conn_table_lock);
56595 + return;
56596 + }
56597 + spin_unlock_bh(&gr_conn_table_lock);
56598 +
56599 + set->curr_ip = inet->inet_daddr;
56600 + set->used_accept = 1;
56601 +#endif
56602 + return;
56603 +}
56604 +
56605 +int
56606 +gr_handle_sock_all(const int family, const int type, const int protocol)
56607 +{
56608 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56609 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56610 + (family != AF_UNIX)) {
56611 + if (family == AF_INET)
56612 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56613 + else
56614 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56615 + return -EACCES;
56616 + }
56617 +#endif
56618 + return 0;
56619 +}
56620 +
56621 +int
56622 +gr_handle_sock_server(const struct sockaddr *sck)
56623 +{
56624 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56625 + if (grsec_enable_socket_server &&
56626 + in_group_p(grsec_socket_server_gid) &&
56627 + sck && (sck->sa_family != AF_UNIX) &&
56628 + (sck->sa_family != AF_LOCAL)) {
56629 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56630 + return -EACCES;
56631 + }
56632 +#endif
56633 + return 0;
56634 +}
56635 +
56636 +int
56637 +gr_handle_sock_server_other(const struct sock *sck)
56638 +{
56639 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56640 + if (grsec_enable_socket_server &&
56641 + in_group_p(grsec_socket_server_gid) &&
56642 + sck && (sck->sk_family != AF_UNIX) &&
56643 + (sck->sk_family != AF_LOCAL)) {
56644 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56645 + return -EACCES;
56646 + }
56647 +#endif
56648 + return 0;
56649 +}
56650 +
56651 +int
56652 +gr_handle_sock_client(const struct sockaddr *sck)
56653 +{
56654 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56655 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56656 + sck && (sck->sa_family != AF_UNIX) &&
56657 + (sck->sa_family != AF_LOCAL)) {
56658 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56659 + return -EACCES;
56660 + }
56661 +#endif
56662 + return 0;
56663 +}
56664 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56665 new file mode 100644
56666 index 0000000..a1aedd7
56667 --- /dev/null
56668 +++ b/grsecurity/grsec_sysctl.c
56669 @@ -0,0 +1,451 @@
56670 +#include <linux/kernel.h>
56671 +#include <linux/sched.h>
56672 +#include <linux/sysctl.h>
56673 +#include <linux/grsecurity.h>
56674 +#include <linux/grinternal.h>
56675 +
56676 +int
56677 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56678 +{
56679 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56680 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56681 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56682 + return -EACCES;
56683 + }
56684 +#endif
56685 + return 0;
56686 +}
56687 +
56688 +#ifdef CONFIG_GRKERNSEC_ROFS
56689 +static int __maybe_unused one = 1;
56690 +#endif
56691 +
56692 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56693 +struct ctl_table grsecurity_table[] = {
56694 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56695 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56696 +#ifdef CONFIG_GRKERNSEC_IO
56697 + {
56698 + .procname = "disable_priv_io",
56699 + .data = &grsec_disable_privio,
56700 + .maxlen = sizeof(int),
56701 + .mode = 0600,
56702 + .proc_handler = &proc_dointvec,
56703 + },
56704 +#endif
56705 +#endif
56706 +#ifdef CONFIG_GRKERNSEC_LINK
56707 + {
56708 + .procname = "linking_restrictions",
56709 + .data = &grsec_enable_link,
56710 + .maxlen = sizeof(int),
56711 + .mode = 0600,
56712 + .proc_handler = &proc_dointvec,
56713 + },
56714 +#endif
56715 +#ifdef CONFIG_GRKERNSEC_BRUTE
56716 + {
56717 + .procname = "deter_bruteforce",
56718 + .data = &grsec_enable_brute,
56719 + .maxlen = sizeof(int),
56720 + .mode = 0600,
56721 + .proc_handler = &proc_dointvec,
56722 + },
56723 +#endif
56724 +#ifdef CONFIG_GRKERNSEC_FIFO
56725 + {
56726 + .procname = "fifo_restrictions",
56727 + .data = &grsec_enable_fifo,
56728 + .maxlen = sizeof(int),
56729 + .mode = 0600,
56730 + .proc_handler = &proc_dointvec,
56731 + },
56732 +#endif
56733 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56734 + {
56735 + .procname = "ptrace_readexec",
56736 + .data = &grsec_enable_ptrace_readexec,
56737 + .maxlen = sizeof(int),
56738 + .mode = 0600,
56739 + .proc_handler = &proc_dointvec,
56740 + },
56741 +#endif
56742 +#ifdef CONFIG_GRKERNSEC_SETXID
56743 + {
56744 + .procname = "consistent_setxid",
56745 + .data = &grsec_enable_setxid,
56746 + .maxlen = sizeof(int),
56747 + .mode = 0600,
56748 + .proc_handler = &proc_dointvec,
56749 + },
56750 +#endif
56751 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56752 + {
56753 + .procname = "ip_blackhole",
56754 + .data = &grsec_enable_blackhole,
56755 + .maxlen = sizeof(int),
56756 + .mode = 0600,
56757 + .proc_handler = &proc_dointvec,
56758 + },
56759 + {
56760 + .procname = "lastack_retries",
56761 + .data = &grsec_lastack_retries,
56762 + .maxlen = sizeof(int),
56763 + .mode = 0600,
56764 + .proc_handler = &proc_dointvec,
56765 + },
56766 +#endif
56767 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56768 + {
56769 + .procname = "exec_logging",
56770 + .data = &grsec_enable_execlog,
56771 + .maxlen = sizeof(int),
56772 + .mode = 0600,
56773 + .proc_handler = &proc_dointvec,
56774 + },
56775 +#endif
56776 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56777 + {
56778 + .procname = "rwxmap_logging",
56779 + .data = &grsec_enable_log_rwxmaps,
56780 + .maxlen = sizeof(int),
56781 + .mode = 0600,
56782 + .proc_handler = &proc_dointvec,
56783 + },
56784 +#endif
56785 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56786 + {
56787 + .procname = "signal_logging",
56788 + .data = &grsec_enable_signal,
56789 + .maxlen = sizeof(int),
56790 + .mode = 0600,
56791 + .proc_handler = &proc_dointvec,
56792 + },
56793 +#endif
56794 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56795 + {
56796 + .procname = "forkfail_logging",
56797 + .data = &grsec_enable_forkfail,
56798 + .maxlen = sizeof(int),
56799 + .mode = 0600,
56800 + .proc_handler = &proc_dointvec,
56801 + },
56802 +#endif
56803 +#ifdef CONFIG_GRKERNSEC_TIME
56804 + {
56805 + .procname = "timechange_logging",
56806 + .data = &grsec_enable_time,
56807 + .maxlen = sizeof(int),
56808 + .mode = 0600,
56809 + .proc_handler = &proc_dointvec,
56810 + },
56811 +#endif
56812 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56813 + {
56814 + .procname = "chroot_deny_shmat",
56815 + .data = &grsec_enable_chroot_shmat,
56816 + .maxlen = sizeof(int),
56817 + .mode = 0600,
56818 + .proc_handler = &proc_dointvec,
56819 + },
56820 +#endif
56821 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56822 + {
56823 + .procname = "chroot_deny_unix",
56824 + .data = &grsec_enable_chroot_unix,
56825 + .maxlen = sizeof(int),
56826 + .mode = 0600,
56827 + .proc_handler = &proc_dointvec,
56828 + },
56829 +#endif
56830 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56831 + {
56832 + .procname = "chroot_deny_mount",
56833 + .data = &grsec_enable_chroot_mount,
56834 + .maxlen = sizeof(int),
56835 + .mode = 0600,
56836 + .proc_handler = &proc_dointvec,
56837 + },
56838 +#endif
56839 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56840 + {
56841 + .procname = "chroot_deny_fchdir",
56842 + .data = &grsec_enable_chroot_fchdir,
56843 + .maxlen = sizeof(int),
56844 + .mode = 0600,
56845 + .proc_handler = &proc_dointvec,
56846 + },
56847 +#endif
56848 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56849 + {
56850 + .procname = "chroot_deny_chroot",
56851 + .data = &grsec_enable_chroot_double,
56852 + .maxlen = sizeof(int),
56853 + .mode = 0600,
56854 + .proc_handler = &proc_dointvec,
56855 + },
56856 +#endif
56857 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56858 + {
56859 + .procname = "chroot_deny_pivot",
56860 + .data = &grsec_enable_chroot_pivot,
56861 + .maxlen = sizeof(int),
56862 + .mode = 0600,
56863 + .proc_handler = &proc_dointvec,
56864 + },
56865 +#endif
56866 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56867 + {
56868 + .procname = "chroot_enforce_chdir",
56869 + .data = &grsec_enable_chroot_chdir,
56870 + .maxlen = sizeof(int),
56871 + .mode = 0600,
56872 + .proc_handler = &proc_dointvec,
56873 + },
56874 +#endif
56875 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56876 + {
56877 + .procname = "chroot_deny_chmod",
56878 + .data = &grsec_enable_chroot_chmod,
56879 + .maxlen = sizeof(int),
56880 + .mode = 0600,
56881 + .proc_handler = &proc_dointvec,
56882 + },
56883 +#endif
56884 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56885 + {
56886 + .procname = "chroot_deny_mknod",
56887 + .data = &grsec_enable_chroot_mknod,
56888 + .maxlen = sizeof(int),
56889 + .mode = 0600,
56890 + .proc_handler = &proc_dointvec,
56891 + },
56892 +#endif
56893 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56894 + {
56895 + .procname = "chroot_restrict_nice",
56896 + .data = &grsec_enable_chroot_nice,
56897 + .maxlen = sizeof(int),
56898 + .mode = 0600,
56899 + .proc_handler = &proc_dointvec,
56900 + },
56901 +#endif
56902 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56903 + {
56904 + .procname = "chroot_execlog",
56905 + .data = &grsec_enable_chroot_execlog,
56906 + .maxlen = sizeof(int),
56907 + .mode = 0600,
56908 + .proc_handler = &proc_dointvec,
56909 + },
56910 +#endif
56911 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56912 + {
56913 + .procname = "chroot_caps",
56914 + .data = &grsec_enable_chroot_caps,
56915 + .maxlen = sizeof(int),
56916 + .mode = 0600,
56917 + .proc_handler = &proc_dointvec,
56918 + },
56919 +#endif
56920 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56921 + {
56922 + .procname = "chroot_deny_sysctl",
56923 + .data = &grsec_enable_chroot_sysctl,
56924 + .maxlen = sizeof(int),
56925 + .mode = 0600,
56926 + .proc_handler = &proc_dointvec,
56927 + },
56928 +#endif
56929 +#ifdef CONFIG_GRKERNSEC_TPE
56930 + {
56931 + .procname = "tpe",
56932 + .data = &grsec_enable_tpe,
56933 + .maxlen = sizeof(int),
56934 + .mode = 0600,
56935 + .proc_handler = &proc_dointvec,
56936 + },
56937 + {
56938 + .procname = "tpe_gid",
56939 + .data = &grsec_tpe_gid,
56940 + .maxlen = sizeof(int),
56941 + .mode = 0600,
56942 + .proc_handler = &proc_dointvec,
56943 + },
56944 +#endif
56945 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56946 + {
56947 + .procname = "tpe_invert",
56948 + .data = &grsec_enable_tpe_invert,
56949 + .maxlen = sizeof(int),
56950 + .mode = 0600,
56951 + .proc_handler = &proc_dointvec,
56952 + },
56953 +#endif
56954 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56955 + {
56956 + .procname = "tpe_restrict_all",
56957 + .data = &grsec_enable_tpe_all,
56958 + .maxlen = sizeof(int),
56959 + .mode = 0600,
56960 + .proc_handler = &proc_dointvec,
56961 + },
56962 +#endif
56963 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56964 + {
56965 + .procname = "socket_all",
56966 + .data = &grsec_enable_socket_all,
56967 + .maxlen = sizeof(int),
56968 + .mode = 0600,
56969 + .proc_handler = &proc_dointvec,
56970 + },
56971 + {
56972 + .procname = "socket_all_gid",
56973 + .data = &grsec_socket_all_gid,
56974 + .maxlen = sizeof(int),
56975 + .mode = 0600,
56976 + .proc_handler = &proc_dointvec,
56977 + },
56978 +#endif
56979 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56980 + {
56981 + .procname = "socket_client",
56982 + .data = &grsec_enable_socket_client,
56983 + .maxlen = sizeof(int),
56984 + .mode = 0600,
56985 + .proc_handler = &proc_dointvec,
56986 + },
56987 + {
56988 + .procname = "socket_client_gid",
56989 + .data = &grsec_socket_client_gid,
56990 + .maxlen = sizeof(int),
56991 + .mode = 0600,
56992 + .proc_handler = &proc_dointvec,
56993 + },
56994 +#endif
56995 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56996 + {
56997 + .procname = "socket_server",
56998 + .data = &grsec_enable_socket_server,
56999 + .maxlen = sizeof(int),
57000 + .mode = 0600,
57001 + .proc_handler = &proc_dointvec,
57002 + },
57003 + {
57004 + .procname = "socket_server_gid",
57005 + .data = &grsec_socket_server_gid,
57006 + .maxlen = sizeof(int),
57007 + .mode = 0600,
57008 + .proc_handler = &proc_dointvec,
57009 + },
57010 +#endif
57011 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57012 + {
57013 + .procname = "audit_group",
57014 + .data = &grsec_enable_group,
57015 + .maxlen = sizeof(int),
57016 + .mode = 0600,
57017 + .proc_handler = &proc_dointvec,
57018 + },
57019 + {
57020 + .procname = "audit_gid",
57021 + .data = &grsec_audit_gid,
57022 + .maxlen = sizeof(int),
57023 + .mode = 0600,
57024 + .proc_handler = &proc_dointvec,
57025 + },
57026 +#endif
57027 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57028 + {
57029 + .procname = "audit_chdir",
57030 + .data = &grsec_enable_chdir,
57031 + .maxlen = sizeof(int),
57032 + .mode = 0600,
57033 + .proc_handler = &proc_dointvec,
57034 + },
57035 +#endif
57036 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57037 + {
57038 + .procname = "audit_mount",
57039 + .data = &grsec_enable_mount,
57040 + .maxlen = sizeof(int),
57041 + .mode = 0600,
57042 + .proc_handler = &proc_dointvec,
57043 + },
57044 +#endif
57045 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57046 + {
57047 + .procname = "audit_textrel",
57048 + .data = &grsec_enable_audit_textrel,
57049 + .maxlen = sizeof(int),
57050 + .mode = 0600,
57051 + .proc_handler = &proc_dointvec,
57052 + },
57053 +#endif
57054 +#ifdef CONFIG_GRKERNSEC_DMESG
57055 + {
57056 + .procname = "dmesg",
57057 + .data = &grsec_enable_dmesg,
57058 + .maxlen = sizeof(int),
57059 + .mode = 0600,
57060 + .proc_handler = &proc_dointvec,
57061 + },
57062 +#endif
57063 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57064 + {
57065 + .procname = "chroot_findtask",
57066 + .data = &grsec_enable_chroot_findtask,
57067 + .maxlen = sizeof(int),
57068 + .mode = 0600,
57069 + .proc_handler = &proc_dointvec,
57070 + },
57071 +#endif
57072 +#ifdef CONFIG_GRKERNSEC_RESLOG
57073 + {
57074 + .procname = "resource_logging",
57075 + .data = &grsec_resource_logging,
57076 + .maxlen = sizeof(int),
57077 + .mode = 0600,
57078 + .proc_handler = &proc_dointvec,
57079 + },
57080 +#endif
57081 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57082 + {
57083 + .procname = "audit_ptrace",
57084 + .data = &grsec_enable_audit_ptrace,
57085 + .maxlen = sizeof(int),
57086 + .mode = 0600,
57087 + .proc_handler = &proc_dointvec,
57088 + },
57089 +#endif
57090 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57091 + {
57092 + .procname = "harden_ptrace",
57093 + .data = &grsec_enable_harden_ptrace,
57094 + .maxlen = sizeof(int),
57095 + .mode = 0600,
57096 + .proc_handler = &proc_dointvec,
57097 + },
57098 +#endif
57099 + {
57100 + .procname = "grsec_lock",
57101 + .data = &grsec_lock,
57102 + .maxlen = sizeof(int),
57103 + .mode = 0600,
57104 + .proc_handler = &proc_dointvec,
57105 + },
57106 +#endif
57107 +#ifdef CONFIG_GRKERNSEC_ROFS
57108 + {
57109 + .procname = "romount_protect",
57110 + .data = &grsec_enable_rofs,
57111 + .maxlen = sizeof(int),
57112 + .mode = 0600,
57113 + .proc_handler = &proc_dointvec_minmax,
57114 + .extra1 = &one,
57115 + .extra2 = &one,
57116 + },
57117 +#endif
57118 + { }
57119 +};
57120 +#endif
57121 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
57122 new file mode 100644
57123 index 0000000..0dc13c3
57124 --- /dev/null
57125 +++ b/grsecurity/grsec_time.c
57126 @@ -0,0 +1,16 @@
57127 +#include <linux/kernel.h>
57128 +#include <linux/sched.h>
57129 +#include <linux/grinternal.h>
57130 +#include <linux/module.h>
57131 +
57132 +void
57133 +gr_log_timechange(void)
57134 +{
57135 +#ifdef CONFIG_GRKERNSEC_TIME
57136 + if (grsec_enable_time)
57137 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
57138 +#endif
57139 + return;
57140 +}
57141 +
57142 +EXPORT_SYMBOL(gr_log_timechange);
57143 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
57144 new file mode 100644
57145 index 0000000..07e0dc0
57146 --- /dev/null
57147 +++ b/grsecurity/grsec_tpe.c
57148 @@ -0,0 +1,73 @@
57149 +#include <linux/kernel.h>
57150 +#include <linux/sched.h>
57151 +#include <linux/file.h>
57152 +#include <linux/fs.h>
57153 +#include <linux/grinternal.h>
57154 +
57155 +extern int gr_acl_tpe_check(void);
57156 +
57157 +int
57158 +gr_tpe_allow(const struct file *file)
57159 +{
57160 +#ifdef CONFIG_GRKERNSEC
57161 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
57162 + const struct cred *cred = current_cred();
57163 + char *msg = NULL;
57164 + char *msg2 = NULL;
57165 +
57166 + // never restrict root
57167 + if (!cred->uid)
57168 + return 1;
57169 +
57170 + if (grsec_enable_tpe) {
57171 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57172 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
57173 + msg = "not being in trusted group";
57174 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
57175 + msg = "being in untrusted group";
57176 +#else
57177 + if (in_group_p(grsec_tpe_gid))
57178 + msg = "being in untrusted group";
57179 +#endif
57180 + }
57181 + if (!msg && gr_acl_tpe_check())
57182 + msg = "being in untrusted role";
57183 +
57184 + // not in any affected group/role
57185 + if (!msg)
57186 + goto next_check;
57187 +
57188 + if (inode->i_uid)
57189 + msg2 = "file in non-root-owned directory";
57190 + else if (inode->i_mode & S_IWOTH)
57191 + msg2 = "file in world-writable directory";
57192 + else if (inode->i_mode & S_IWGRP)
57193 + msg2 = "file in group-writable directory";
57194 +
57195 + if (msg && msg2) {
57196 + char fullmsg[70] = {0};
57197 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
57198 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
57199 + return 0;
57200 + }
57201 + msg = NULL;
57202 +next_check:
57203 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57204 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
57205 + return 1;
57206 +
57207 + if (inode->i_uid && (inode->i_uid != cred->uid))
57208 + msg = "directory not owned by user";
57209 + else if (inode->i_mode & S_IWOTH)
57210 + msg = "file in world-writable directory";
57211 + else if (inode->i_mode & S_IWGRP)
57212 + msg = "file in group-writable directory";
57213 +
57214 + if (msg) {
57215 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
57216 + return 0;
57217 + }
57218 +#endif
57219 +#endif
57220 + return 1;
57221 +}
57222 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
57223 new file mode 100644
57224 index 0000000..9f7b1ac
57225 --- /dev/null
57226 +++ b/grsecurity/grsum.c
57227 @@ -0,0 +1,61 @@
57228 +#include <linux/err.h>
57229 +#include <linux/kernel.h>
57230 +#include <linux/sched.h>
57231 +#include <linux/mm.h>
57232 +#include <linux/scatterlist.h>
57233 +#include <linux/crypto.h>
57234 +#include <linux/gracl.h>
57235 +
57236 +
57237 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
57238 +#error "crypto and sha256 must be built into the kernel"
57239 +#endif
57240 +
57241 +int
57242 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
57243 +{
57244 + char *p;
57245 + struct crypto_hash *tfm;
57246 + struct hash_desc desc;
57247 + struct scatterlist sg;
57248 + unsigned char temp_sum[GR_SHA_LEN];
57249 + volatile int retval = 0;
57250 + volatile int dummy = 0;
57251 + unsigned int i;
57252 +
57253 + sg_init_table(&sg, 1);
57254 +
57255 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
57256 + if (IS_ERR(tfm)) {
57257 + /* should never happen, since sha256 should be built in */
57258 + return 1;
57259 + }
57260 +
57261 + desc.tfm = tfm;
57262 + desc.flags = 0;
57263 +
57264 + crypto_hash_init(&desc);
57265 +
57266 + p = salt;
57267 + sg_set_buf(&sg, p, GR_SALT_LEN);
57268 + crypto_hash_update(&desc, &sg, sg.length);
57269 +
57270 + p = entry->pw;
57271 + sg_set_buf(&sg, p, strlen(p));
57272 +
57273 + crypto_hash_update(&desc, &sg, sg.length);
57274 +
57275 + crypto_hash_final(&desc, temp_sum);
57276 +
57277 + memset(entry->pw, 0, GR_PW_LEN);
57278 +
57279 + for (i = 0; i < GR_SHA_LEN; i++)
57280 + if (sum[i] != temp_sum[i])
57281 + retval = 1;
57282 + else
57283 + dummy = 1; // waste a cycle
57284 +
57285 + crypto_free_hash(tfm);
57286 +
57287 + return retval;
57288 +}
57289 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
57290 index 6cd5b64..f620d2d 100644
57291 --- a/include/acpi/acpi_bus.h
57292 +++ b/include/acpi/acpi_bus.h
57293 @@ -107,7 +107,7 @@ struct acpi_device_ops {
57294 acpi_op_bind bind;
57295 acpi_op_unbind unbind;
57296 acpi_op_notify notify;
57297 -};
57298 +} __no_const;
57299
57300 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57301
57302 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
57303 index b7babf0..71e4e74 100644
57304 --- a/include/asm-generic/atomic-long.h
57305 +++ b/include/asm-generic/atomic-long.h
57306 @@ -22,6 +22,12 @@
57307
57308 typedef atomic64_t atomic_long_t;
57309
57310 +#ifdef CONFIG_PAX_REFCOUNT
57311 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
57312 +#else
57313 +typedef atomic64_t atomic_long_unchecked_t;
57314 +#endif
57315 +
57316 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57317
57318 static inline long atomic_long_read(atomic_long_t *l)
57319 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57320 return (long)atomic64_read(v);
57321 }
57322
57323 +#ifdef CONFIG_PAX_REFCOUNT
57324 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57325 +{
57326 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57327 +
57328 + return (long)atomic64_read_unchecked(v);
57329 +}
57330 +#endif
57331 +
57332 static inline void atomic_long_set(atomic_long_t *l, long i)
57333 {
57334 atomic64_t *v = (atomic64_t *)l;
57335 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57336 atomic64_set(v, i);
57337 }
57338
57339 +#ifdef CONFIG_PAX_REFCOUNT
57340 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57341 +{
57342 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57343 +
57344 + atomic64_set_unchecked(v, i);
57345 +}
57346 +#endif
57347 +
57348 static inline void atomic_long_inc(atomic_long_t *l)
57349 {
57350 atomic64_t *v = (atomic64_t *)l;
57351 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57352 atomic64_inc(v);
57353 }
57354
57355 +#ifdef CONFIG_PAX_REFCOUNT
57356 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57357 +{
57358 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57359 +
57360 + atomic64_inc_unchecked(v);
57361 +}
57362 +#endif
57363 +
57364 static inline void atomic_long_dec(atomic_long_t *l)
57365 {
57366 atomic64_t *v = (atomic64_t *)l;
57367 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57368 atomic64_dec(v);
57369 }
57370
57371 +#ifdef CONFIG_PAX_REFCOUNT
57372 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57373 +{
57374 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57375 +
57376 + atomic64_dec_unchecked(v);
57377 +}
57378 +#endif
57379 +
57380 static inline void atomic_long_add(long i, atomic_long_t *l)
57381 {
57382 atomic64_t *v = (atomic64_t *)l;
57383 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57384 atomic64_add(i, v);
57385 }
57386
57387 +#ifdef CONFIG_PAX_REFCOUNT
57388 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57389 +{
57390 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57391 +
57392 + atomic64_add_unchecked(i, v);
57393 +}
57394 +#endif
57395 +
57396 static inline void atomic_long_sub(long i, atomic_long_t *l)
57397 {
57398 atomic64_t *v = (atomic64_t *)l;
57399 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57400 atomic64_sub(i, v);
57401 }
57402
57403 +#ifdef CONFIG_PAX_REFCOUNT
57404 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57405 +{
57406 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57407 +
57408 + atomic64_sub_unchecked(i, v);
57409 +}
57410 +#endif
57411 +
57412 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57413 {
57414 atomic64_t *v = (atomic64_t *)l;
57415 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57416 return (long)atomic64_inc_return(v);
57417 }
57418
57419 +#ifdef CONFIG_PAX_REFCOUNT
57420 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57421 +{
57422 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57423 +
57424 + return (long)atomic64_inc_return_unchecked(v);
57425 +}
57426 +#endif
57427 +
57428 static inline long atomic_long_dec_return(atomic_long_t *l)
57429 {
57430 atomic64_t *v = (atomic64_t *)l;
57431 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57432
57433 typedef atomic_t atomic_long_t;
57434
57435 +#ifdef CONFIG_PAX_REFCOUNT
57436 +typedef atomic_unchecked_t atomic_long_unchecked_t;
57437 +#else
57438 +typedef atomic_t atomic_long_unchecked_t;
57439 +#endif
57440 +
57441 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57442 static inline long atomic_long_read(atomic_long_t *l)
57443 {
57444 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57445 return (long)atomic_read(v);
57446 }
57447
57448 +#ifdef CONFIG_PAX_REFCOUNT
57449 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57450 +{
57451 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57452 +
57453 + return (long)atomic_read_unchecked(v);
57454 +}
57455 +#endif
57456 +
57457 static inline void atomic_long_set(atomic_long_t *l, long i)
57458 {
57459 atomic_t *v = (atomic_t *)l;
57460 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57461 atomic_set(v, i);
57462 }
57463
57464 +#ifdef CONFIG_PAX_REFCOUNT
57465 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57466 +{
57467 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57468 +
57469 + atomic_set_unchecked(v, i);
57470 +}
57471 +#endif
57472 +
57473 static inline void atomic_long_inc(atomic_long_t *l)
57474 {
57475 atomic_t *v = (atomic_t *)l;
57476 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57477 atomic_inc(v);
57478 }
57479
57480 +#ifdef CONFIG_PAX_REFCOUNT
57481 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57482 +{
57483 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57484 +
57485 + atomic_inc_unchecked(v);
57486 +}
57487 +#endif
57488 +
57489 static inline void atomic_long_dec(atomic_long_t *l)
57490 {
57491 atomic_t *v = (atomic_t *)l;
57492 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57493 atomic_dec(v);
57494 }
57495
57496 +#ifdef CONFIG_PAX_REFCOUNT
57497 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57498 +{
57499 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57500 +
57501 + atomic_dec_unchecked(v);
57502 +}
57503 +#endif
57504 +
57505 static inline void atomic_long_add(long i, atomic_long_t *l)
57506 {
57507 atomic_t *v = (atomic_t *)l;
57508 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57509 atomic_add(i, v);
57510 }
57511
57512 +#ifdef CONFIG_PAX_REFCOUNT
57513 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57514 +{
57515 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57516 +
57517 + atomic_add_unchecked(i, v);
57518 +}
57519 +#endif
57520 +
57521 static inline void atomic_long_sub(long i, atomic_long_t *l)
57522 {
57523 atomic_t *v = (atomic_t *)l;
57524 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57525 atomic_sub(i, v);
57526 }
57527
57528 +#ifdef CONFIG_PAX_REFCOUNT
57529 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57530 +{
57531 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57532 +
57533 + atomic_sub_unchecked(i, v);
57534 +}
57535 +#endif
57536 +
57537 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57538 {
57539 atomic_t *v = (atomic_t *)l;
57540 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57541 return (long)atomic_inc_return(v);
57542 }
57543
57544 +#ifdef CONFIG_PAX_REFCOUNT
57545 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57546 +{
57547 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57548 +
57549 + return (long)atomic_inc_return_unchecked(v);
57550 +}
57551 +#endif
57552 +
57553 static inline long atomic_long_dec_return(atomic_long_t *l)
57554 {
57555 atomic_t *v = (atomic_t *)l;
57556 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57557
57558 #endif /* BITS_PER_LONG == 64 */
57559
57560 +#ifdef CONFIG_PAX_REFCOUNT
57561 +static inline void pax_refcount_needs_these_functions(void)
57562 +{
57563 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
57564 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57565 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57566 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57567 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57568 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57569 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57570 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57571 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57572 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57573 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57574 +
57575 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57576 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57577 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57578 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57579 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57580 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57581 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57582 +}
57583 +#else
57584 +#define atomic_read_unchecked(v) atomic_read(v)
57585 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57586 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57587 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57588 +#define atomic_inc_unchecked(v) atomic_inc(v)
57589 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57590 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57591 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57592 +#define atomic_dec_unchecked(v) atomic_dec(v)
57593 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57594 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57595 +
57596 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
57597 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57598 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57599 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57600 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57601 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57602 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57603 +#endif
57604 +
57605 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57606 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57607 index b18ce4f..2ee2843 100644
57608 --- a/include/asm-generic/atomic64.h
57609 +++ b/include/asm-generic/atomic64.h
57610 @@ -16,6 +16,8 @@ typedef struct {
57611 long long counter;
57612 } atomic64_t;
57613
57614 +typedef atomic64_t atomic64_unchecked_t;
57615 +
57616 #define ATOMIC64_INIT(i) { (i) }
57617
57618 extern long long atomic64_read(const atomic64_t *v);
57619 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57620 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57621 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57622
57623 +#define atomic64_read_unchecked(v) atomic64_read(v)
57624 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57625 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57626 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57627 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57628 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
57629 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57630 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
57631 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57632 +
57633 #endif /* _ASM_GENERIC_ATOMIC64_H */
57634 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57635 index 1bfcfe5..e04c5c9 100644
57636 --- a/include/asm-generic/cache.h
57637 +++ b/include/asm-generic/cache.h
57638 @@ -6,7 +6,7 @@
57639 * cache lines need to provide their own cache.h.
57640 */
57641
57642 -#define L1_CACHE_SHIFT 5
57643 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57644 +#define L1_CACHE_SHIFT 5UL
57645 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57646
57647 #endif /* __ASM_GENERIC_CACHE_H */
57648 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57649 index 1ca3efc..e3dc852 100644
57650 --- a/include/asm-generic/int-l64.h
57651 +++ b/include/asm-generic/int-l64.h
57652 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57653 typedef signed long s64;
57654 typedef unsigned long u64;
57655
57656 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57657 +
57658 #define S8_C(x) x
57659 #define U8_C(x) x ## U
57660 #define S16_C(x) x
57661 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57662 index f394147..b6152b9 100644
57663 --- a/include/asm-generic/int-ll64.h
57664 +++ b/include/asm-generic/int-ll64.h
57665 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57666 typedef signed long long s64;
57667 typedef unsigned long long u64;
57668
57669 +typedef unsigned long long intoverflow_t;
57670 +
57671 #define S8_C(x) x
57672 #define U8_C(x) x ## U
57673 #define S16_C(x) x
57674 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57675 index 0232ccb..13d9165 100644
57676 --- a/include/asm-generic/kmap_types.h
57677 +++ b/include/asm-generic/kmap_types.h
57678 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57679 KMAP_D(17) KM_NMI,
57680 KMAP_D(18) KM_NMI_PTE,
57681 KMAP_D(19) KM_KDB,
57682 +KMAP_D(20) KM_CLEARPAGE,
57683 /*
57684 * Remember to update debug_kmap_atomic() when adding new kmap types!
57685 */
57686 -KMAP_D(20) KM_TYPE_NR
57687 +KMAP_D(21) KM_TYPE_NR
57688 };
57689
57690 #undef KMAP_D
57691 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57692 index 725612b..9cc513a 100644
57693 --- a/include/asm-generic/pgtable-nopmd.h
57694 +++ b/include/asm-generic/pgtable-nopmd.h
57695 @@ -1,14 +1,19 @@
57696 #ifndef _PGTABLE_NOPMD_H
57697 #define _PGTABLE_NOPMD_H
57698
57699 -#ifndef __ASSEMBLY__
57700 -
57701 #include <asm-generic/pgtable-nopud.h>
57702
57703 -struct mm_struct;
57704 -
57705 #define __PAGETABLE_PMD_FOLDED
57706
57707 +#define PMD_SHIFT PUD_SHIFT
57708 +#define PTRS_PER_PMD 1
57709 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57710 +#define PMD_MASK (~(PMD_SIZE-1))
57711 +
57712 +#ifndef __ASSEMBLY__
57713 +
57714 +struct mm_struct;
57715 +
57716 /*
57717 * Having the pmd type consist of a pud gets the size right, and allows
57718 * us to conceptually access the pud entry that this pmd is folded into
57719 @@ -16,11 +21,6 @@ struct mm_struct;
57720 */
57721 typedef struct { pud_t pud; } pmd_t;
57722
57723 -#define PMD_SHIFT PUD_SHIFT
57724 -#define PTRS_PER_PMD 1
57725 -#define PMD_SIZE (1UL << PMD_SHIFT)
57726 -#define PMD_MASK (~(PMD_SIZE-1))
57727 -
57728 /*
57729 * The "pud_xxx()" functions here are trivial for a folded two-level
57730 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57731 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57732 index 810431d..ccc3638 100644
57733 --- a/include/asm-generic/pgtable-nopud.h
57734 +++ b/include/asm-generic/pgtable-nopud.h
57735 @@ -1,10 +1,15 @@
57736 #ifndef _PGTABLE_NOPUD_H
57737 #define _PGTABLE_NOPUD_H
57738
57739 -#ifndef __ASSEMBLY__
57740 -
57741 #define __PAGETABLE_PUD_FOLDED
57742
57743 +#define PUD_SHIFT PGDIR_SHIFT
57744 +#define PTRS_PER_PUD 1
57745 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57746 +#define PUD_MASK (~(PUD_SIZE-1))
57747 +
57748 +#ifndef __ASSEMBLY__
57749 +
57750 /*
57751 * Having the pud type consist of a pgd gets the size right, and allows
57752 * us to conceptually access the pgd entry that this pud is folded into
57753 @@ -12,11 +17,6 @@
57754 */
57755 typedef struct { pgd_t pgd; } pud_t;
57756
57757 -#define PUD_SHIFT PGDIR_SHIFT
57758 -#define PTRS_PER_PUD 1
57759 -#define PUD_SIZE (1UL << PUD_SHIFT)
57760 -#define PUD_MASK (~(PUD_SIZE-1))
57761 -
57762 /*
57763 * The "pgd_xxx()" functions here are trivial for a folded two-level
57764 * setup: the pud is never bad, and a pud always exists (as it's folded
57765 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57766 index 76bff2b..c7a14e2 100644
57767 --- a/include/asm-generic/pgtable.h
57768 +++ b/include/asm-generic/pgtable.h
57769 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57770 #endif /* __HAVE_ARCH_PMD_WRITE */
57771 #endif
57772
57773 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57774 +static inline unsigned long pax_open_kernel(void) { return 0; }
57775 +#endif
57776 +
57777 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57778 +static inline unsigned long pax_close_kernel(void) { return 0; }
57779 +#endif
57780 +
57781 #endif /* !__ASSEMBLY__ */
57782
57783 #endif /* _ASM_GENERIC_PGTABLE_H */
57784 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57785 index b5e2e4c..6a5373e 100644
57786 --- a/include/asm-generic/vmlinux.lds.h
57787 +++ b/include/asm-generic/vmlinux.lds.h
57788 @@ -217,6 +217,7 @@
57789 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57790 VMLINUX_SYMBOL(__start_rodata) = .; \
57791 *(.rodata) *(.rodata.*) \
57792 + *(.data..read_only) \
57793 *(__vermagic) /* Kernel version magic */ \
57794 . = ALIGN(8); \
57795 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57796 @@ -722,17 +723,18 @@
57797 * section in the linker script will go there too. @phdr should have
57798 * a leading colon.
57799 *
57800 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57801 + * Note that this macros defines per_cpu_load as an absolute symbol.
57802 * If there is no need to put the percpu section at a predetermined
57803 * address, use PERCPU_SECTION.
57804 */
57805 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57806 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57807 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57808 + per_cpu_load = .; \
57809 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57810 - LOAD_OFFSET) { \
57811 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57812 PERCPU_INPUT(cacheline) \
57813 } phdr \
57814 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57815 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57816
57817 /**
57818 * PERCPU_SECTION - define output section for percpu area, simple version
57819 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57820 index bf4b2dc..2d0762f 100644
57821 --- a/include/drm/drmP.h
57822 +++ b/include/drm/drmP.h
57823 @@ -72,6 +72,7 @@
57824 #include <linux/workqueue.h>
57825 #include <linux/poll.h>
57826 #include <asm/pgalloc.h>
57827 +#include <asm/local.h>
57828 #include "drm.h"
57829
57830 #include <linux/idr.h>
57831 @@ -1038,7 +1039,7 @@ struct drm_device {
57832
57833 /** \name Usage Counters */
57834 /*@{ */
57835 - int open_count; /**< Outstanding files open */
57836 + local_t open_count; /**< Outstanding files open */
57837 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57838 atomic_t vma_count; /**< Outstanding vma areas open */
57839 int buf_use; /**< Buffers in use -- cannot alloc */
57840 @@ -1049,7 +1050,7 @@ struct drm_device {
57841 /*@{ */
57842 unsigned long counters;
57843 enum drm_stat_type types[15];
57844 - atomic_t counts[15];
57845 + atomic_unchecked_t counts[15];
57846 /*@} */
57847
57848 struct list_head filelist;
57849 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57850 index 73b0712..0b7ef2f 100644
57851 --- a/include/drm/drm_crtc_helper.h
57852 +++ b/include/drm/drm_crtc_helper.h
57853 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57854
57855 /* disable crtc when not in use - more explicit than dpms off */
57856 void (*disable)(struct drm_crtc *crtc);
57857 -};
57858 +} __no_const;
57859
57860 struct drm_encoder_helper_funcs {
57861 void (*dpms)(struct drm_encoder *encoder, int mode);
57862 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57863 struct drm_connector *connector);
57864 /* disable encoder when not in use - more explicit than dpms off */
57865 void (*disable)(struct drm_encoder *encoder);
57866 -};
57867 +} __no_const;
57868
57869 struct drm_connector_helper_funcs {
57870 int (*get_modes)(struct drm_connector *connector);
57871 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57872 index 26c1f78..6722682 100644
57873 --- a/include/drm/ttm/ttm_memory.h
57874 +++ b/include/drm/ttm/ttm_memory.h
57875 @@ -47,7 +47,7 @@
57876
57877 struct ttm_mem_shrink {
57878 int (*do_shrink) (struct ttm_mem_shrink *);
57879 -};
57880 +} __no_const;
57881
57882 /**
57883 * struct ttm_mem_global - Global memory accounting structure.
57884 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57885 index e86dfca..40cc55f 100644
57886 --- a/include/linux/a.out.h
57887 +++ b/include/linux/a.out.h
57888 @@ -39,6 +39,14 @@ enum machine_type {
57889 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57890 };
57891
57892 +/* Constants for the N_FLAGS field */
57893 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57894 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57895 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57896 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57897 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57898 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57899 +
57900 #if !defined (N_MAGIC)
57901 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57902 #endif
57903 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57904 index 49a83ca..df96b54 100644
57905 --- a/include/linux/atmdev.h
57906 +++ b/include/linux/atmdev.h
57907 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57908 #endif
57909
57910 struct k_atm_aal_stats {
57911 -#define __HANDLE_ITEM(i) atomic_t i
57912 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57913 __AAL_STAT_ITEMS
57914 #undef __HANDLE_ITEM
57915 };
57916 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57917 index fd88a39..8a801b4 100644
57918 --- a/include/linux/binfmts.h
57919 +++ b/include/linux/binfmts.h
57920 @@ -18,7 +18,7 @@ struct pt_regs;
57921 #define BINPRM_BUF_SIZE 128
57922
57923 #ifdef __KERNEL__
57924 -#include <linux/list.h>
57925 +#include <linux/sched.h>
57926
57927 #define CORENAME_MAX_SIZE 128
57928
57929 @@ -58,6 +58,7 @@ struct linux_binprm {
57930 unsigned interp_flags;
57931 unsigned interp_data;
57932 unsigned long loader, exec;
57933 + char tcomm[TASK_COMM_LEN];
57934 };
57935
57936 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
57937 @@ -88,6 +89,7 @@ struct linux_binfmt {
57938 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57939 int (*load_shlib)(struct file *);
57940 int (*core_dump)(struct coredump_params *cprm);
57941 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57942 unsigned long min_coredump; /* minimal dump size */
57943 };
57944
57945 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57946 index 0ed1eb0..3ab569b 100644
57947 --- a/include/linux/blkdev.h
57948 +++ b/include/linux/blkdev.h
57949 @@ -1315,7 +1315,7 @@ struct block_device_operations {
57950 /* this callback is with swap_lock and sometimes page table lock held */
57951 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57952 struct module *owner;
57953 -};
57954 +} __do_const;
57955
57956 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57957 unsigned long);
57958 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57959 index 4d1a074..88f929a 100644
57960 --- a/include/linux/blktrace_api.h
57961 +++ b/include/linux/blktrace_api.h
57962 @@ -162,7 +162,7 @@ struct blk_trace {
57963 struct dentry *dir;
57964 struct dentry *dropped_file;
57965 struct dentry *msg_file;
57966 - atomic_t dropped;
57967 + atomic_unchecked_t dropped;
57968 };
57969
57970 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57971 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57972 index 83195fb..0b0f77d 100644
57973 --- a/include/linux/byteorder/little_endian.h
57974 +++ b/include/linux/byteorder/little_endian.h
57975 @@ -42,51 +42,51 @@
57976
57977 static inline __le64 __cpu_to_le64p(const __u64 *p)
57978 {
57979 - return (__force __le64)*p;
57980 + return (__force const __le64)*p;
57981 }
57982 static inline __u64 __le64_to_cpup(const __le64 *p)
57983 {
57984 - return (__force __u64)*p;
57985 + return (__force const __u64)*p;
57986 }
57987 static inline __le32 __cpu_to_le32p(const __u32 *p)
57988 {
57989 - return (__force __le32)*p;
57990 + return (__force const __le32)*p;
57991 }
57992 static inline __u32 __le32_to_cpup(const __le32 *p)
57993 {
57994 - return (__force __u32)*p;
57995 + return (__force const __u32)*p;
57996 }
57997 static inline __le16 __cpu_to_le16p(const __u16 *p)
57998 {
57999 - return (__force __le16)*p;
58000 + return (__force const __le16)*p;
58001 }
58002 static inline __u16 __le16_to_cpup(const __le16 *p)
58003 {
58004 - return (__force __u16)*p;
58005 + return (__force const __u16)*p;
58006 }
58007 static inline __be64 __cpu_to_be64p(const __u64 *p)
58008 {
58009 - return (__force __be64)__swab64p(p);
58010 + return (__force const __be64)__swab64p(p);
58011 }
58012 static inline __u64 __be64_to_cpup(const __be64 *p)
58013 {
58014 - return __swab64p((__u64 *)p);
58015 + return __swab64p((const __u64 *)p);
58016 }
58017 static inline __be32 __cpu_to_be32p(const __u32 *p)
58018 {
58019 - return (__force __be32)__swab32p(p);
58020 + return (__force const __be32)__swab32p(p);
58021 }
58022 static inline __u32 __be32_to_cpup(const __be32 *p)
58023 {
58024 - return __swab32p((__u32 *)p);
58025 + return __swab32p((const __u32 *)p);
58026 }
58027 static inline __be16 __cpu_to_be16p(const __u16 *p)
58028 {
58029 - return (__force __be16)__swab16p(p);
58030 + return (__force const __be16)__swab16p(p);
58031 }
58032 static inline __u16 __be16_to_cpup(const __be16 *p)
58033 {
58034 - return __swab16p((__u16 *)p);
58035 + return __swab16p((const __u16 *)p);
58036 }
58037 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
58038 #define __le64_to_cpus(x) do { (void)(x); } while (0)
58039 diff --git a/include/linux/cache.h b/include/linux/cache.h
58040 index 4c57065..4307975 100644
58041 --- a/include/linux/cache.h
58042 +++ b/include/linux/cache.h
58043 @@ -16,6 +16,10 @@
58044 #define __read_mostly
58045 #endif
58046
58047 +#ifndef __read_only
58048 +#define __read_only __read_mostly
58049 +#endif
58050 +
58051 #ifndef ____cacheline_aligned
58052 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
58053 #endif
58054 diff --git a/include/linux/capability.h b/include/linux/capability.h
58055 index a63d13d..069bfd5 100644
58056 --- a/include/linux/capability.h
58057 +++ b/include/linux/capability.h
58058 @@ -548,6 +548,9 @@ extern bool capable(int cap);
58059 extern bool ns_capable(struct user_namespace *ns, int cap);
58060 extern bool task_ns_capable(struct task_struct *t, int cap);
58061 extern bool nsown_capable(int cap);
58062 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
58063 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
58064 +extern bool capable_nolog(int cap);
58065
58066 /* audit system wants to get cap info from files as well */
58067 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
58068 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
58069 index 04ffb2e..6799180 100644
58070 --- a/include/linux/cleancache.h
58071 +++ b/include/linux/cleancache.h
58072 @@ -31,7 +31,7 @@ struct cleancache_ops {
58073 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
58074 void (*flush_inode)(int, struct cleancache_filekey);
58075 void (*flush_fs)(int);
58076 -};
58077 +} __no_const;
58078
58079 extern struct cleancache_ops
58080 cleancache_register_ops(struct cleancache_ops *ops);
58081 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
58082 index dfadc96..c0e70c1 100644
58083 --- a/include/linux/compiler-gcc4.h
58084 +++ b/include/linux/compiler-gcc4.h
58085 @@ -31,6 +31,12 @@
58086
58087
58088 #if __GNUC_MINOR__ >= 5
58089 +
58090 +#ifdef CONSTIFY_PLUGIN
58091 +#define __no_const __attribute__((no_const))
58092 +#define __do_const __attribute__((do_const))
58093 +#endif
58094 +
58095 /*
58096 * Mark a position in code as unreachable. This can be used to
58097 * suppress control flow warnings after asm blocks that transfer
58098 @@ -46,6 +52,11 @@
58099 #define __noclone __attribute__((__noclone__))
58100
58101 #endif
58102 +
58103 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
58104 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
58105 +#define __bos0(ptr) __bos((ptr), 0)
58106 +#define __bos1(ptr) __bos((ptr), 1)
58107 #endif
58108
58109 #if __GNUC_MINOR__ > 0
58110 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
58111 index 320d6c9..8573a1c 100644
58112 --- a/include/linux/compiler.h
58113 +++ b/include/linux/compiler.h
58114 @@ -5,31 +5,62 @@
58115
58116 #ifdef __CHECKER__
58117 # define __user __attribute__((noderef, address_space(1)))
58118 +# define __force_user __force __user
58119 # define __kernel __attribute__((address_space(0)))
58120 +# define __force_kernel __force __kernel
58121 # define __safe __attribute__((safe))
58122 # define __force __attribute__((force))
58123 # define __nocast __attribute__((nocast))
58124 # define __iomem __attribute__((noderef, address_space(2)))
58125 +# define __force_iomem __force __iomem
58126 # define __acquires(x) __attribute__((context(x,0,1)))
58127 # define __releases(x) __attribute__((context(x,1,0)))
58128 # define __acquire(x) __context__(x,1)
58129 # define __release(x) __context__(x,-1)
58130 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
58131 # define __percpu __attribute__((noderef, address_space(3)))
58132 +# define __force_percpu __force __percpu
58133 #ifdef CONFIG_SPARSE_RCU_POINTER
58134 # define __rcu __attribute__((noderef, address_space(4)))
58135 +# define __force_rcu __force __rcu
58136 #else
58137 # define __rcu
58138 +# define __force_rcu
58139 #endif
58140 extern void __chk_user_ptr(const volatile void __user *);
58141 extern void __chk_io_ptr(const volatile void __iomem *);
58142 +#elif defined(CHECKER_PLUGIN)
58143 +//# define __user
58144 +//# define __force_user
58145 +//# define __kernel
58146 +//# define __force_kernel
58147 +# define __safe
58148 +# define __force
58149 +# define __nocast
58150 +# define __iomem
58151 +# define __force_iomem
58152 +# define __chk_user_ptr(x) (void)0
58153 +# define __chk_io_ptr(x) (void)0
58154 +# define __builtin_warning(x, y...) (1)
58155 +# define __acquires(x)
58156 +# define __releases(x)
58157 +# define __acquire(x) (void)0
58158 +# define __release(x) (void)0
58159 +# define __cond_lock(x,c) (c)
58160 +# define __percpu
58161 +# define __force_percpu
58162 +# define __rcu
58163 +# define __force_rcu
58164 #else
58165 # define __user
58166 +# define __force_user
58167 # define __kernel
58168 +# define __force_kernel
58169 # define __safe
58170 # define __force
58171 # define __nocast
58172 # define __iomem
58173 +# define __force_iomem
58174 # define __chk_user_ptr(x) (void)0
58175 # define __chk_io_ptr(x) (void)0
58176 # define __builtin_warning(x, y...) (1)
58177 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
58178 # define __release(x) (void)0
58179 # define __cond_lock(x,c) (c)
58180 # define __percpu
58181 +# define __force_percpu
58182 # define __rcu
58183 +# define __force_rcu
58184 #endif
58185
58186 #ifdef __KERNEL__
58187 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58188 # define __attribute_const__ /* unimplemented */
58189 #endif
58190
58191 +#ifndef __no_const
58192 +# define __no_const
58193 +#endif
58194 +
58195 +#ifndef __do_const
58196 +# define __do_const
58197 +#endif
58198 +
58199 /*
58200 * Tell gcc if a function is cold. The compiler will assume any path
58201 * directly leading to the call is unlikely.
58202 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58203 #define __cold
58204 #endif
58205
58206 +#ifndef __alloc_size
58207 +#define __alloc_size(...)
58208 +#endif
58209 +
58210 +#ifndef __bos
58211 +#define __bos(ptr, arg)
58212 +#endif
58213 +
58214 +#ifndef __bos0
58215 +#define __bos0(ptr)
58216 +#endif
58217 +
58218 +#ifndef __bos1
58219 +#define __bos1(ptr)
58220 +#endif
58221 +
58222 /* Simple shorthand for a section definition */
58223 #ifndef __section
58224 # define __section(S) __attribute__ ((__section__(#S)))
58225 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58226 * use is to mediate communication between process-level code and irq/NMI
58227 * handlers, all running on the same CPU.
58228 */
58229 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
58230 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
58231 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
58232
58233 #endif /* __LINUX_COMPILER_H */
58234 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
58235 index e9eaec5..bfeb9bb 100644
58236 --- a/include/linux/cpuset.h
58237 +++ b/include/linux/cpuset.h
58238 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
58239 * nodemask.
58240 */
58241 smp_mb();
58242 - --ACCESS_ONCE(current->mems_allowed_change_disable);
58243 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
58244 }
58245
58246 static inline void set_mems_allowed(nodemask_t nodemask)
58247 diff --git a/include/linux/cred.h b/include/linux/cred.h
58248 index 4030896..8d6f342 100644
58249 --- a/include/linux/cred.h
58250 +++ b/include/linux/cred.h
58251 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
58252 static inline void validate_process_creds(void)
58253 {
58254 }
58255 +static inline void validate_task_creds(struct task_struct *task)
58256 +{
58257 +}
58258 #endif
58259
58260 /**
58261 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
58262 index 8a94217..15d49e3 100644
58263 --- a/include/linux/crypto.h
58264 +++ b/include/linux/crypto.h
58265 @@ -365,7 +365,7 @@ struct cipher_tfm {
58266 const u8 *key, unsigned int keylen);
58267 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58268 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58269 -};
58270 +} __no_const;
58271
58272 struct hash_tfm {
58273 int (*init)(struct hash_desc *desc);
58274 @@ -386,13 +386,13 @@ struct compress_tfm {
58275 int (*cot_decompress)(struct crypto_tfm *tfm,
58276 const u8 *src, unsigned int slen,
58277 u8 *dst, unsigned int *dlen);
58278 -};
58279 +} __no_const;
58280
58281 struct rng_tfm {
58282 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58283 unsigned int dlen);
58284 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58285 -};
58286 +} __no_const;
58287
58288 #define crt_ablkcipher crt_u.ablkcipher
58289 #define crt_aead crt_u.aead
58290 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
58291 index 7925bf0..d5143d2 100644
58292 --- a/include/linux/decompress/mm.h
58293 +++ b/include/linux/decompress/mm.h
58294 @@ -77,7 +77,7 @@ static void free(void *where)
58295 * warnings when not needed (indeed large_malloc / large_free are not
58296 * needed by inflate */
58297
58298 -#define malloc(a) kmalloc(a, GFP_KERNEL)
58299 +#define malloc(a) kmalloc((a), GFP_KERNEL)
58300 #define free(a) kfree(a)
58301
58302 #define large_malloc(a) vmalloc(a)
58303 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
58304 index e13117c..e9fc938 100644
58305 --- a/include/linux/dma-mapping.h
58306 +++ b/include/linux/dma-mapping.h
58307 @@ -46,7 +46,7 @@ struct dma_map_ops {
58308 u64 (*get_required_mask)(struct device *dev);
58309 #endif
58310 int is_phys;
58311 -};
58312 +} __do_const;
58313
58314 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58315
58316 diff --git a/include/linux/efi.h b/include/linux/efi.h
58317 index 2362a0b..cfaf8fcc 100644
58318 --- a/include/linux/efi.h
58319 +++ b/include/linux/efi.h
58320 @@ -446,7 +446,7 @@ struct efivar_operations {
58321 efi_get_variable_t *get_variable;
58322 efi_get_next_variable_t *get_next_variable;
58323 efi_set_variable_t *set_variable;
58324 -};
58325 +} __no_const;
58326
58327 struct efivars {
58328 /*
58329 diff --git a/include/linux/elf.h b/include/linux/elf.h
58330 index 31f0508..5421c01 100644
58331 --- a/include/linux/elf.h
58332 +++ b/include/linux/elf.h
58333 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58334 #define PT_GNU_EH_FRAME 0x6474e550
58335
58336 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58337 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58338 +
58339 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58340 +
58341 +/* Constants for the e_flags field */
58342 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58343 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58344 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58345 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58346 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58347 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58348
58349 /*
58350 * Extended Numbering
58351 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58352 #define DT_DEBUG 21
58353 #define DT_TEXTREL 22
58354 #define DT_JMPREL 23
58355 +#define DT_FLAGS 30
58356 + #define DF_TEXTREL 0x00000004
58357 #define DT_ENCODING 32
58358 #define OLD_DT_LOOS 0x60000000
58359 #define DT_LOOS 0x6000000d
58360 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58361 #define PF_W 0x2
58362 #define PF_X 0x1
58363
58364 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58365 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58366 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58367 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58368 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58369 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58370 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58371 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58372 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58373 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58374 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58375 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58376 +
58377 typedef struct elf32_phdr{
58378 Elf32_Word p_type;
58379 Elf32_Off p_offset;
58380 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58381 #define EI_OSABI 7
58382 #define EI_PAD 8
58383
58384 +#define EI_PAX 14
58385 +
58386 #define ELFMAG0 0x7f /* EI_MAG */
58387 #define ELFMAG1 'E'
58388 #define ELFMAG2 'L'
58389 @@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58390 #define elf_note elf32_note
58391 #define elf_addr_t Elf32_Off
58392 #define Elf_Half Elf32_Half
58393 +#define elf_dyn Elf32_Dyn
58394
58395 #else
58396
58397 @@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58398 #define elf_note elf64_note
58399 #define elf_addr_t Elf64_Off
58400 #define Elf_Half Elf64_Half
58401 +#define elf_dyn Elf64_Dyn
58402
58403 #endif
58404
58405 diff --git a/include/linux/filter.h b/include/linux/filter.h
58406 index 8eeb205..d59bfa2 100644
58407 --- a/include/linux/filter.h
58408 +++ b/include/linux/filter.h
58409 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58410
58411 struct sk_buff;
58412 struct sock;
58413 +struct bpf_jit_work;
58414
58415 struct sk_filter
58416 {
58417 @@ -141,6 +142,9 @@ struct sk_filter
58418 unsigned int len; /* Number of filter blocks */
58419 unsigned int (*bpf_func)(const struct sk_buff *skb,
58420 const struct sock_filter *filter);
58421 +#ifdef CONFIG_BPF_JIT
58422 + struct bpf_jit_work *work;
58423 +#endif
58424 struct rcu_head rcu;
58425 struct sock_filter insns[0];
58426 };
58427 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58428 index 84ccf8e..2e9b14c 100644
58429 --- a/include/linux/firewire.h
58430 +++ b/include/linux/firewire.h
58431 @@ -428,7 +428,7 @@ struct fw_iso_context {
58432 union {
58433 fw_iso_callback_t sc;
58434 fw_iso_mc_callback_t mc;
58435 - } callback;
58436 + } __no_const callback;
58437 void *callback_data;
58438 };
58439
58440 diff --git a/include/linux/fs.h b/include/linux/fs.h
58441 index e0bc4ff..d79c2fa 100644
58442 --- a/include/linux/fs.h
58443 +++ b/include/linux/fs.h
58444 @@ -1608,7 +1608,8 @@ struct file_operations {
58445 int (*setlease)(struct file *, long, struct file_lock **);
58446 long (*fallocate)(struct file *file, int mode, loff_t offset,
58447 loff_t len);
58448 -};
58449 +} __do_const;
58450 +typedef struct file_operations __no_const file_operations_no_const;
58451
58452 struct inode_operations {
58453 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58454 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58455 index 003dc0f..3c4ea97 100644
58456 --- a/include/linux/fs_struct.h
58457 +++ b/include/linux/fs_struct.h
58458 @@ -6,7 +6,7 @@
58459 #include <linux/seqlock.h>
58460
58461 struct fs_struct {
58462 - int users;
58463 + atomic_t users;
58464 spinlock_t lock;
58465 seqcount_t seq;
58466 int umask;
58467 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58468 index ce31408..b1ad003 100644
58469 --- a/include/linux/fscache-cache.h
58470 +++ b/include/linux/fscache-cache.h
58471 @@ -102,7 +102,7 @@ struct fscache_operation {
58472 fscache_operation_release_t release;
58473 };
58474
58475 -extern atomic_t fscache_op_debug_id;
58476 +extern atomic_unchecked_t fscache_op_debug_id;
58477 extern void fscache_op_work_func(struct work_struct *work);
58478
58479 extern void fscache_enqueue_operation(struct fscache_operation *);
58480 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58481 {
58482 INIT_WORK(&op->work, fscache_op_work_func);
58483 atomic_set(&op->usage, 1);
58484 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58485 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58486 op->processor = processor;
58487 op->release = release;
58488 INIT_LIST_HEAD(&op->pend_link);
58489 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58490 index 2a53f10..0187fdf 100644
58491 --- a/include/linux/fsnotify.h
58492 +++ b/include/linux/fsnotify.h
58493 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58494 */
58495 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58496 {
58497 - return kstrdup(name, GFP_KERNEL);
58498 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58499 }
58500
58501 /*
58502 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58503 index 91d0e0a3..035666b 100644
58504 --- a/include/linux/fsnotify_backend.h
58505 +++ b/include/linux/fsnotify_backend.h
58506 @@ -105,6 +105,7 @@ struct fsnotify_ops {
58507 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58508 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58509 };
58510 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58511
58512 /*
58513 * A group is a "thing" that wants to receive notification about filesystem
58514 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58515 index c3da42d..c70e0df 100644
58516 --- a/include/linux/ftrace_event.h
58517 +++ b/include/linux/ftrace_event.h
58518 @@ -97,7 +97,7 @@ struct trace_event_functions {
58519 trace_print_func raw;
58520 trace_print_func hex;
58521 trace_print_func binary;
58522 -};
58523 +} __no_const;
58524
58525 struct trace_event {
58526 struct hlist_node node;
58527 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58528 extern int trace_add_event_call(struct ftrace_event_call *call);
58529 extern void trace_remove_event_call(struct ftrace_event_call *call);
58530
58531 -#define is_signed_type(type) (((type)(-1)) < 0)
58532 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58533
58534 int trace_set_clr_event(const char *system, const char *event, int set);
58535
58536 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58537 index 6d18f35..ab71e2c 100644
58538 --- a/include/linux/genhd.h
58539 +++ b/include/linux/genhd.h
58540 @@ -185,7 +185,7 @@ struct gendisk {
58541 struct kobject *slave_dir;
58542
58543 struct timer_rand_state *random;
58544 - atomic_t sync_io; /* RAID */
58545 + atomic_unchecked_t sync_io; /* RAID */
58546 struct disk_events *ev;
58547 #ifdef CONFIG_BLK_DEV_INTEGRITY
58548 struct blk_integrity *integrity;
58549 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58550 new file mode 100644
58551 index 0000000..8a130b6
58552 --- /dev/null
58553 +++ b/include/linux/gracl.h
58554 @@ -0,0 +1,319 @@
58555 +#ifndef GR_ACL_H
58556 +#define GR_ACL_H
58557 +
58558 +#include <linux/grdefs.h>
58559 +#include <linux/resource.h>
58560 +#include <linux/capability.h>
58561 +#include <linux/dcache.h>
58562 +#include <asm/resource.h>
58563 +
58564 +/* Major status information */
58565 +
58566 +#define GR_VERSION "grsecurity 2.9"
58567 +#define GRSECURITY_VERSION 0x2900
58568 +
58569 +enum {
58570 + GR_SHUTDOWN = 0,
58571 + GR_ENABLE = 1,
58572 + GR_SPROLE = 2,
58573 + GR_RELOAD = 3,
58574 + GR_SEGVMOD = 4,
58575 + GR_STATUS = 5,
58576 + GR_UNSPROLE = 6,
58577 + GR_PASSSET = 7,
58578 + GR_SPROLEPAM = 8,
58579 +};
58580 +
58581 +/* Password setup definitions
58582 + * kernel/grhash.c */
58583 +enum {
58584 + GR_PW_LEN = 128,
58585 + GR_SALT_LEN = 16,
58586 + GR_SHA_LEN = 32,
58587 +};
58588 +
58589 +enum {
58590 + GR_SPROLE_LEN = 64,
58591 +};
58592 +
58593 +enum {
58594 + GR_NO_GLOB = 0,
58595 + GR_REG_GLOB,
58596 + GR_CREATE_GLOB
58597 +};
58598 +
58599 +#define GR_NLIMITS 32
58600 +
58601 +/* Begin Data Structures */
58602 +
58603 +struct sprole_pw {
58604 + unsigned char *rolename;
58605 + unsigned char salt[GR_SALT_LEN];
58606 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58607 +};
58608 +
58609 +struct name_entry {
58610 + __u32 key;
58611 + ino_t inode;
58612 + dev_t device;
58613 + char *name;
58614 + __u16 len;
58615 + __u8 deleted;
58616 + struct name_entry *prev;
58617 + struct name_entry *next;
58618 +};
58619 +
58620 +struct inodev_entry {
58621 + struct name_entry *nentry;
58622 + struct inodev_entry *prev;
58623 + struct inodev_entry *next;
58624 +};
58625 +
58626 +struct acl_role_db {
58627 + struct acl_role_label **r_hash;
58628 + __u32 r_size;
58629 +};
58630 +
58631 +struct inodev_db {
58632 + struct inodev_entry **i_hash;
58633 + __u32 i_size;
58634 +};
58635 +
58636 +struct name_db {
58637 + struct name_entry **n_hash;
58638 + __u32 n_size;
58639 +};
58640 +
58641 +struct crash_uid {
58642 + uid_t uid;
58643 + unsigned long expires;
58644 +};
58645 +
58646 +struct gr_hash_struct {
58647 + void **table;
58648 + void **nametable;
58649 + void *first;
58650 + __u32 table_size;
58651 + __u32 used_size;
58652 + int type;
58653 +};
58654 +
58655 +/* Userspace Grsecurity ACL data structures */
58656 +
58657 +struct acl_subject_label {
58658 + char *filename;
58659 + ino_t inode;
58660 + dev_t device;
58661 + __u32 mode;
58662 + kernel_cap_t cap_mask;
58663 + kernel_cap_t cap_lower;
58664 + kernel_cap_t cap_invert_audit;
58665 +
58666 + struct rlimit res[GR_NLIMITS];
58667 + __u32 resmask;
58668 +
58669 + __u8 user_trans_type;
58670 + __u8 group_trans_type;
58671 + uid_t *user_transitions;
58672 + gid_t *group_transitions;
58673 + __u16 user_trans_num;
58674 + __u16 group_trans_num;
58675 +
58676 + __u32 sock_families[2];
58677 + __u32 ip_proto[8];
58678 + __u32 ip_type;
58679 + struct acl_ip_label **ips;
58680 + __u32 ip_num;
58681 + __u32 inaddr_any_override;
58682 +
58683 + __u32 crashes;
58684 + unsigned long expires;
58685 +
58686 + struct acl_subject_label *parent_subject;
58687 + struct gr_hash_struct *hash;
58688 + struct acl_subject_label *prev;
58689 + struct acl_subject_label *next;
58690 +
58691 + struct acl_object_label **obj_hash;
58692 + __u32 obj_hash_size;
58693 + __u16 pax_flags;
58694 +};
58695 +
58696 +struct role_allowed_ip {
58697 + __u32 addr;
58698 + __u32 netmask;
58699 +
58700 + struct role_allowed_ip *prev;
58701 + struct role_allowed_ip *next;
58702 +};
58703 +
58704 +struct role_transition {
58705 + char *rolename;
58706 +
58707 + struct role_transition *prev;
58708 + struct role_transition *next;
58709 +};
58710 +
58711 +struct acl_role_label {
58712 + char *rolename;
58713 + uid_t uidgid;
58714 + __u16 roletype;
58715 +
58716 + __u16 auth_attempts;
58717 + unsigned long expires;
58718 +
58719 + struct acl_subject_label *root_label;
58720 + struct gr_hash_struct *hash;
58721 +
58722 + struct acl_role_label *prev;
58723 + struct acl_role_label *next;
58724 +
58725 + struct role_transition *transitions;
58726 + struct role_allowed_ip *allowed_ips;
58727 + uid_t *domain_children;
58728 + __u16 domain_child_num;
58729 +
58730 + umode_t umask;
58731 +
58732 + struct acl_subject_label **subj_hash;
58733 + __u32 subj_hash_size;
58734 +};
58735 +
58736 +struct user_acl_role_db {
58737 + struct acl_role_label **r_table;
58738 + __u32 num_pointers; /* Number of allocations to track */
58739 + __u32 num_roles; /* Number of roles */
58740 + __u32 num_domain_children; /* Number of domain children */
58741 + __u32 num_subjects; /* Number of subjects */
58742 + __u32 num_objects; /* Number of objects */
58743 +};
58744 +
58745 +struct acl_object_label {
58746 + char *filename;
58747 + ino_t inode;
58748 + dev_t device;
58749 + __u32 mode;
58750 +
58751 + struct acl_subject_label *nested;
58752 + struct acl_object_label *globbed;
58753 +
58754 + /* next two structures not used */
58755 +
58756 + struct acl_object_label *prev;
58757 + struct acl_object_label *next;
58758 +};
58759 +
58760 +struct acl_ip_label {
58761 + char *iface;
58762 + __u32 addr;
58763 + __u32 netmask;
58764 + __u16 low, high;
58765 + __u8 mode;
58766 + __u32 type;
58767 + __u32 proto[8];
58768 +
58769 + /* next two structures not used */
58770 +
58771 + struct acl_ip_label *prev;
58772 + struct acl_ip_label *next;
58773 +};
58774 +
58775 +struct gr_arg {
58776 + struct user_acl_role_db role_db;
58777 + unsigned char pw[GR_PW_LEN];
58778 + unsigned char salt[GR_SALT_LEN];
58779 + unsigned char sum[GR_SHA_LEN];
58780 + unsigned char sp_role[GR_SPROLE_LEN];
58781 + struct sprole_pw *sprole_pws;
58782 + dev_t segv_device;
58783 + ino_t segv_inode;
58784 + uid_t segv_uid;
58785 + __u16 num_sprole_pws;
58786 + __u16 mode;
58787 +};
58788 +
58789 +struct gr_arg_wrapper {
58790 + struct gr_arg *arg;
58791 + __u32 version;
58792 + __u32 size;
58793 +};
58794 +
58795 +struct subject_map {
58796 + struct acl_subject_label *user;
58797 + struct acl_subject_label *kernel;
58798 + struct subject_map *prev;
58799 + struct subject_map *next;
58800 +};
58801 +
58802 +struct acl_subj_map_db {
58803 + struct subject_map **s_hash;
58804 + __u32 s_size;
58805 +};
58806 +
58807 +/* End Data Structures Section */
58808 +
58809 +/* Hash functions generated by empirical testing by Brad Spengler
58810 + Makes good use of the low bits of the inode. Generally 0-1 times
58811 + in loop for successful match. 0-3 for unsuccessful match.
58812 + Shift/add algorithm with modulus of table size and an XOR*/
58813 +
58814 +static __inline__ unsigned int
58815 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58816 +{
58817 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58818 +}
58819 +
58820 + static __inline__ unsigned int
58821 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58822 +{
58823 + return ((const unsigned long)userp % sz);
58824 +}
58825 +
58826 +static __inline__ unsigned int
58827 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58828 +{
58829 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58830 +}
58831 +
58832 +static __inline__ unsigned int
58833 +nhash(const char *name, const __u16 len, const unsigned int sz)
58834 +{
58835 + return full_name_hash((const unsigned char *)name, len) % sz;
58836 +}
58837 +
58838 +#define FOR_EACH_ROLE_START(role) \
58839 + role = role_list; \
58840 + while (role) {
58841 +
58842 +#define FOR_EACH_ROLE_END(role) \
58843 + role = role->prev; \
58844 + }
58845 +
58846 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58847 + subj = NULL; \
58848 + iter = 0; \
58849 + while (iter < role->subj_hash_size) { \
58850 + if (subj == NULL) \
58851 + subj = role->subj_hash[iter]; \
58852 + if (subj == NULL) { \
58853 + iter++; \
58854 + continue; \
58855 + }
58856 +
58857 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58858 + subj = subj->next; \
58859 + if (subj == NULL) \
58860 + iter++; \
58861 + }
58862 +
58863 +
58864 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58865 + subj = role->hash->first; \
58866 + while (subj != NULL) {
58867 +
58868 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58869 + subj = subj->next; \
58870 + }
58871 +
58872 +#endif
58873 +
58874 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58875 new file mode 100644
58876 index 0000000..323ecf2
58877 --- /dev/null
58878 +++ b/include/linux/gralloc.h
58879 @@ -0,0 +1,9 @@
58880 +#ifndef __GRALLOC_H
58881 +#define __GRALLOC_H
58882 +
58883 +void acl_free_all(void);
58884 +int acl_alloc_stack_init(unsigned long size);
58885 +void *acl_alloc(unsigned long len);
58886 +void *acl_alloc_num(unsigned long num, unsigned long len);
58887 +
58888 +#endif
58889 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58890 new file mode 100644
58891 index 0000000..b30e9bc
58892 --- /dev/null
58893 +++ b/include/linux/grdefs.h
58894 @@ -0,0 +1,140 @@
58895 +#ifndef GRDEFS_H
58896 +#define GRDEFS_H
58897 +
58898 +/* Begin grsecurity status declarations */
58899 +
58900 +enum {
58901 + GR_READY = 0x01,
58902 + GR_STATUS_INIT = 0x00 // disabled state
58903 +};
58904 +
58905 +/* Begin ACL declarations */
58906 +
58907 +/* Role flags */
58908 +
58909 +enum {
58910 + GR_ROLE_USER = 0x0001,
58911 + GR_ROLE_GROUP = 0x0002,
58912 + GR_ROLE_DEFAULT = 0x0004,
58913 + GR_ROLE_SPECIAL = 0x0008,
58914 + GR_ROLE_AUTH = 0x0010,
58915 + GR_ROLE_NOPW = 0x0020,
58916 + GR_ROLE_GOD = 0x0040,
58917 + GR_ROLE_LEARN = 0x0080,
58918 + GR_ROLE_TPE = 0x0100,
58919 + GR_ROLE_DOMAIN = 0x0200,
58920 + GR_ROLE_PAM = 0x0400,
58921 + GR_ROLE_PERSIST = 0x0800
58922 +};
58923 +
58924 +/* ACL Subject and Object mode flags */
58925 +enum {
58926 + GR_DELETED = 0x80000000
58927 +};
58928 +
58929 +/* ACL Object-only mode flags */
58930 +enum {
58931 + GR_READ = 0x00000001,
58932 + GR_APPEND = 0x00000002,
58933 + GR_WRITE = 0x00000004,
58934 + GR_EXEC = 0x00000008,
58935 + GR_FIND = 0x00000010,
58936 + GR_INHERIT = 0x00000020,
58937 + GR_SETID = 0x00000040,
58938 + GR_CREATE = 0x00000080,
58939 + GR_DELETE = 0x00000100,
58940 + GR_LINK = 0x00000200,
58941 + GR_AUDIT_READ = 0x00000400,
58942 + GR_AUDIT_APPEND = 0x00000800,
58943 + GR_AUDIT_WRITE = 0x00001000,
58944 + GR_AUDIT_EXEC = 0x00002000,
58945 + GR_AUDIT_FIND = 0x00004000,
58946 + GR_AUDIT_INHERIT= 0x00008000,
58947 + GR_AUDIT_SETID = 0x00010000,
58948 + GR_AUDIT_CREATE = 0x00020000,
58949 + GR_AUDIT_DELETE = 0x00040000,
58950 + GR_AUDIT_LINK = 0x00080000,
58951 + GR_PTRACERD = 0x00100000,
58952 + GR_NOPTRACE = 0x00200000,
58953 + GR_SUPPRESS = 0x00400000,
58954 + GR_NOLEARN = 0x00800000,
58955 + GR_INIT_TRANSFER= 0x01000000
58956 +};
58957 +
58958 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58959 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58960 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58961 +
58962 +/* ACL subject-only mode flags */
58963 +enum {
58964 + GR_KILL = 0x00000001,
58965 + GR_VIEW = 0x00000002,
58966 + GR_PROTECTED = 0x00000004,
58967 + GR_LEARN = 0x00000008,
58968 + GR_OVERRIDE = 0x00000010,
58969 + /* just a placeholder, this mode is only used in userspace */
58970 + GR_DUMMY = 0x00000020,
58971 + GR_PROTSHM = 0x00000040,
58972 + GR_KILLPROC = 0x00000080,
58973 + GR_KILLIPPROC = 0x00000100,
58974 + /* just a placeholder, this mode is only used in userspace */
58975 + GR_NOTROJAN = 0x00000200,
58976 + GR_PROTPROCFD = 0x00000400,
58977 + GR_PROCACCT = 0x00000800,
58978 + GR_RELAXPTRACE = 0x00001000,
58979 + GR_NESTED = 0x00002000,
58980 + GR_INHERITLEARN = 0x00004000,
58981 + GR_PROCFIND = 0x00008000,
58982 + GR_POVERRIDE = 0x00010000,
58983 + GR_KERNELAUTH = 0x00020000,
58984 + GR_ATSECURE = 0x00040000,
58985 + GR_SHMEXEC = 0x00080000
58986 +};
58987 +
58988 +enum {
58989 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58990 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58991 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58992 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58993 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58994 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58995 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58996 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58997 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58998 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58999 +};
59000 +
59001 +enum {
59002 + GR_ID_USER = 0x01,
59003 + GR_ID_GROUP = 0x02,
59004 +};
59005 +
59006 +enum {
59007 + GR_ID_ALLOW = 0x01,
59008 + GR_ID_DENY = 0x02,
59009 +};
59010 +
59011 +#define GR_CRASH_RES 31
59012 +#define GR_UIDTABLE_MAX 500
59013 +
59014 +/* begin resource learning section */
59015 +enum {
59016 + GR_RLIM_CPU_BUMP = 60,
59017 + GR_RLIM_FSIZE_BUMP = 50000,
59018 + GR_RLIM_DATA_BUMP = 10000,
59019 + GR_RLIM_STACK_BUMP = 1000,
59020 + GR_RLIM_CORE_BUMP = 10000,
59021 + GR_RLIM_RSS_BUMP = 500000,
59022 + GR_RLIM_NPROC_BUMP = 1,
59023 + GR_RLIM_NOFILE_BUMP = 5,
59024 + GR_RLIM_MEMLOCK_BUMP = 50000,
59025 + GR_RLIM_AS_BUMP = 500000,
59026 + GR_RLIM_LOCKS_BUMP = 2,
59027 + GR_RLIM_SIGPENDING_BUMP = 5,
59028 + GR_RLIM_MSGQUEUE_BUMP = 10000,
59029 + GR_RLIM_NICE_BUMP = 1,
59030 + GR_RLIM_RTPRIO_BUMP = 1,
59031 + GR_RLIM_RTTIME_BUMP = 1000000
59032 +};
59033 +
59034 +#endif
59035 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
59036 new file mode 100644
59037 index 0000000..da390f1
59038 --- /dev/null
59039 +++ b/include/linux/grinternal.h
59040 @@ -0,0 +1,221 @@
59041 +#ifndef __GRINTERNAL_H
59042 +#define __GRINTERNAL_H
59043 +
59044 +#ifdef CONFIG_GRKERNSEC
59045 +
59046 +#include <linux/fs.h>
59047 +#include <linux/mnt_namespace.h>
59048 +#include <linux/nsproxy.h>
59049 +#include <linux/gracl.h>
59050 +#include <linux/grdefs.h>
59051 +#include <linux/grmsg.h>
59052 +
59053 +void gr_add_learn_entry(const char *fmt, ...)
59054 + __attribute__ ((format (printf, 1, 2)));
59055 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
59056 + const struct vfsmount *mnt);
59057 +__u32 gr_check_create(const struct dentry *new_dentry,
59058 + const struct dentry *parent,
59059 + const struct vfsmount *mnt, const __u32 mode);
59060 +int gr_check_protected_task(const struct task_struct *task);
59061 +__u32 to_gr_audit(const __u32 reqmode);
59062 +int gr_set_acls(const int type);
59063 +int gr_apply_subject_to_task(struct task_struct *task);
59064 +int gr_acl_is_enabled(void);
59065 +char gr_roletype_to_char(void);
59066 +
59067 +void gr_handle_alertkill(struct task_struct *task);
59068 +char *gr_to_filename(const struct dentry *dentry,
59069 + const struct vfsmount *mnt);
59070 +char *gr_to_filename1(const struct dentry *dentry,
59071 + const struct vfsmount *mnt);
59072 +char *gr_to_filename2(const struct dentry *dentry,
59073 + const struct vfsmount *mnt);
59074 +char *gr_to_filename3(const struct dentry *dentry,
59075 + const struct vfsmount *mnt);
59076 +
59077 +extern int grsec_enable_ptrace_readexec;
59078 +extern int grsec_enable_harden_ptrace;
59079 +extern int grsec_enable_link;
59080 +extern int grsec_enable_fifo;
59081 +extern int grsec_enable_execve;
59082 +extern int grsec_enable_shm;
59083 +extern int grsec_enable_execlog;
59084 +extern int grsec_enable_signal;
59085 +extern int grsec_enable_audit_ptrace;
59086 +extern int grsec_enable_forkfail;
59087 +extern int grsec_enable_time;
59088 +extern int grsec_enable_rofs;
59089 +extern int grsec_enable_chroot_shmat;
59090 +extern int grsec_enable_chroot_mount;
59091 +extern int grsec_enable_chroot_double;
59092 +extern int grsec_enable_chroot_pivot;
59093 +extern int grsec_enable_chroot_chdir;
59094 +extern int grsec_enable_chroot_chmod;
59095 +extern int grsec_enable_chroot_mknod;
59096 +extern int grsec_enable_chroot_fchdir;
59097 +extern int grsec_enable_chroot_nice;
59098 +extern int grsec_enable_chroot_execlog;
59099 +extern int grsec_enable_chroot_caps;
59100 +extern int grsec_enable_chroot_sysctl;
59101 +extern int grsec_enable_chroot_unix;
59102 +extern int grsec_enable_tpe;
59103 +extern int grsec_tpe_gid;
59104 +extern int grsec_enable_tpe_all;
59105 +extern int grsec_enable_tpe_invert;
59106 +extern int grsec_enable_socket_all;
59107 +extern int grsec_socket_all_gid;
59108 +extern int grsec_enable_socket_client;
59109 +extern int grsec_socket_client_gid;
59110 +extern int grsec_enable_socket_server;
59111 +extern int grsec_socket_server_gid;
59112 +extern int grsec_audit_gid;
59113 +extern int grsec_enable_group;
59114 +extern int grsec_enable_audit_textrel;
59115 +extern int grsec_enable_log_rwxmaps;
59116 +extern int grsec_enable_mount;
59117 +extern int grsec_enable_chdir;
59118 +extern int grsec_resource_logging;
59119 +extern int grsec_enable_blackhole;
59120 +extern int grsec_lastack_retries;
59121 +extern int grsec_enable_brute;
59122 +extern int grsec_lock;
59123 +
59124 +extern spinlock_t grsec_alert_lock;
59125 +extern unsigned long grsec_alert_wtime;
59126 +extern unsigned long grsec_alert_fyet;
59127 +
59128 +extern spinlock_t grsec_audit_lock;
59129 +
59130 +extern rwlock_t grsec_exec_file_lock;
59131 +
59132 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
59133 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
59134 + (tsk)->exec_file->f_vfsmnt) : "/")
59135 +
59136 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
59137 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
59138 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59139 +
59140 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
59141 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
59142 + (tsk)->exec_file->f_vfsmnt) : "/")
59143 +
59144 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
59145 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
59146 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59147 +
59148 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
59149 +
59150 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
59151 +
59152 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
59153 + (task)->pid, (cred)->uid, \
59154 + (cred)->euid, (cred)->gid, (cred)->egid, \
59155 + gr_parent_task_fullpath(task), \
59156 + (task)->real_parent->comm, (task)->real_parent->pid, \
59157 + (pcred)->uid, (pcred)->euid, \
59158 + (pcred)->gid, (pcred)->egid
59159 +
59160 +#define GR_CHROOT_CAPS {{ \
59161 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
59162 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
59163 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
59164 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
59165 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
59166 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
59167 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
59168 +
59169 +#define security_learn(normal_msg,args...) \
59170 +({ \
59171 + read_lock(&grsec_exec_file_lock); \
59172 + gr_add_learn_entry(normal_msg "\n", ## args); \
59173 + read_unlock(&grsec_exec_file_lock); \
59174 +})
59175 +
59176 +enum {
59177 + GR_DO_AUDIT,
59178 + GR_DONT_AUDIT,
59179 + /* used for non-audit messages that we shouldn't kill the task on */
59180 + GR_DONT_AUDIT_GOOD
59181 +};
59182 +
59183 +enum {
59184 + GR_TTYSNIFF,
59185 + GR_RBAC,
59186 + GR_RBAC_STR,
59187 + GR_STR_RBAC,
59188 + GR_RBAC_MODE2,
59189 + GR_RBAC_MODE3,
59190 + GR_FILENAME,
59191 + GR_SYSCTL_HIDDEN,
59192 + GR_NOARGS,
59193 + GR_ONE_INT,
59194 + GR_ONE_INT_TWO_STR,
59195 + GR_ONE_STR,
59196 + GR_STR_INT,
59197 + GR_TWO_STR_INT,
59198 + GR_TWO_INT,
59199 + GR_TWO_U64,
59200 + GR_THREE_INT,
59201 + GR_FIVE_INT_TWO_STR,
59202 + GR_TWO_STR,
59203 + GR_THREE_STR,
59204 + GR_FOUR_STR,
59205 + GR_STR_FILENAME,
59206 + GR_FILENAME_STR,
59207 + GR_FILENAME_TWO_INT,
59208 + GR_FILENAME_TWO_INT_STR,
59209 + GR_TEXTREL,
59210 + GR_PTRACE,
59211 + GR_RESOURCE,
59212 + GR_CAP,
59213 + GR_SIG,
59214 + GR_SIG2,
59215 + GR_CRASH1,
59216 + GR_CRASH2,
59217 + GR_PSACCT,
59218 + GR_RWXMAP
59219 +};
59220 +
59221 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
59222 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
59223 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
59224 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
59225 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
59226 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
59227 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
59228 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
59229 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
59230 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
59231 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
59232 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
59233 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
59234 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
59235 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
59236 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
59237 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
59238 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
59239 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
59240 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
59241 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
59242 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
59243 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
59244 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
59245 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
59246 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
59247 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
59248 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
59249 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
59250 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
59251 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
59252 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
59253 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
59254 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
59255 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
59256 +
59257 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
59258 +
59259 +#endif
59260 +
59261 +#endif
59262 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
59263 new file mode 100644
59264 index 0000000..f885406
59265 --- /dev/null
59266 +++ b/include/linux/grmsg.h
59267 @@ -0,0 +1,109 @@
59268 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59269 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59270 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59271 +#define GR_STOPMOD_MSG "denied modification of module state by "
59272 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59273 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59274 +#define GR_IOPERM_MSG "denied use of ioperm() by "
59275 +#define GR_IOPL_MSG "denied use of iopl() by "
59276 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59277 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59278 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59279 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59280 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59281 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59282 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59283 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59284 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59285 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59286 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59287 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59288 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59289 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59290 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59291 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59292 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59293 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59294 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59295 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59296 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59297 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59298 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59299 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59300 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59301 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59302 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
59303 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59304 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59305 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59306 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59307 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59308 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59309 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59310 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59311 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59312 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59313 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59314 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59315 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59316 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59317 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59318 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59319 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
59320 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59321 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59322 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59323 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59324 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59325 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59326 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59327 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59328 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59329 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59330 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59331 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59332 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59333 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59334 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59335 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59336 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59337 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59338 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59339 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
59340 +#define GR_NICE_CHROOT_MSG "denied priority change by "
59341 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59342 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59343 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59344 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59345 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59346 +#define GR_TIME_MSG "time set by "
59347 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59348 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59349 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59350 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59351 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59352 +#define GR_BIND_MSG "denied bind() by "
59353 +#define GR_CONNECT_MSG "denied connect() by "
59354 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59355 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59356 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59357 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59358 +#define GR_CAP_ACL_MSG "use of %s denied for "
59359 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59360 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59361 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59362 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59363 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59364 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59365 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59366 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59367 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59368 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59369 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59370 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59371 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59372 +#define GR_VM86_MSG "denied use of vm86 by "
59373 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59374 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
59375 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59376 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
59377 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
59378 new file mode 100644
59379 index 0000000..2ccf677
59380 --- /dev/null
59381 +++ b/include/linux/grsecurity.h
59382 @@ -0,0 +1,229 @@
59383 +#ifndef GR_SECURITY_H
59384 +#define GR_SECURITY_H
59385 +#include <linux/fs.h>
59386 +#include <linux/fs_struct.h>
59387 +#include <linux/binfmts.h>
59388 +#include <linux/gracl.h>
59389 +
59390 +/* notify of brain-dead configs */
59391 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59392 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59393 +#endif
59394 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59395 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59396 +#endif
59397 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59398 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59399 +#endif
59400 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59401 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
59402 +#endif
59403 +
59404 +#include <linux/compat.h>
59405 +
59406 +struct user_arg_ptr {
59407 +#ifdef CONFIG_COMPAT
59408 + bool is_compat;
59409 +#endif
59410 + union {
59411 + const char __user *const __user *native;
59412 +#ifdef CONFIG_COMPAT
59413 + compat_uptr_t __user *compat;
59414 +#endif
59415 + } ptr;
59416 +};
59417 +
59418 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59419 +void gr_handle_brute_check(void);
59420 +void gr_handle_kernel_exploit(void);
59421 +int gr_process_user_ban(void);
59422 +
59423 +char gr_roletype_to_char(void);
59424 +
59425 +int gr_acl_enable_at_secure(void);
59426 +
59427 +int gr_check_user_change(int real, int effective, int fs);
59428 +int gr_check_group_change(int real, int effective, int fs);
59429 +
59430 +void gr_del_task_from_ip_table(struct task_struct *p);
59431 +
59432 +int gr_pid_is_chrooted(struct task_struct *p);
59433 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59434 +int gr_handle_chroot_nice(void);
59435 +int gr_handle_chroot_sysctl(const int op);
59436 +int gr_handle_chroot_setpriority(struct task_struct *p,
59437 + const int niceval);
59438 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59439 +int gr_handle_chroot_chroot(const struct dentry *dentry,
59440 + const struct vfsmount *mnt);
59441 +void gr_handle_chroot_chdir(struct path *path);
59442 +int gr_handle_chroot_chmod(const struct dentry *dentry,
59443 + const struct vfsmount *mnt, const int mode);
59444 +int gr_handle_chroot_mknod(const struct dentry *dentry,
59445 + const struct vfsmount *mnt, const int mode);
59446 +int gr_handle_chroot_mount(const struct dentry *dentry,
59447 + const struct vfsmount *mnt,
59448 + const char *dev_name);
59449 +int gr_handle_chroot_pivot(void);
59450 +int gr_handle_chroot_unix(const pid_t pid);
59451 +
59452 +int gr_handle_rawio(const struct inode *inode);
59453 +
59454 +void gr_handle_ioperm(void);
59455 +void gr_handle_iopl(void);
59456 +
59457 +umode_t gr_acl_umask(void);
59458 +
59459 +int gr_tpe_allow(const struct file *file);
59460 +
59461 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59462 +void gr_clear_chroot_entries(struct task_struct *task);
59463 +
59464 +void gr_log_forkfail(const int retval);
59465 +void gr_log_timechange(void);
59466 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59467 +void gr_log_chdir(const struct dentry *dentry,
59468 + const struct vfsmount *mnt);
59469 +void gr_log_chroot_exec(const struct dentry *dentry,
59470 + const struct vfsmount *mnt);
59471 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59472 +void gr_log_remount(const char *devname, const int retval);
59473 +void gr_log_unmount(const char *devname, const int retval);
59474 +void gr_log_mount(const char *from, const char *to, const int retval);
59475 +void gr_log_textrel(struct vm_area_struct *vma);
59476 +void gr_log_rwxmmap(struct file *file);
59477 +void gr_log_rwxmprotect(struct file *file);
59478 +
59479 +int gr_handle_follow_link(const struct inode *parent,
59480 + const struct inode *inode,
59481 + const struct dentry *dentry,
59482 + const struct vfsmount *mnt);
59483 +int gr_handle_fifo(const struct dentry *dentry,
59484 + const struct vfsmount *mnt,
59485 + const struct dentry *dir, const int flag,
59486 + const int acc_mode);
59487 +int gr_handle_hardlink(const struct dentry *dentry,
59488 + const struct vfsmount *mnt,
59489 + struct inode *inode,
59490 + const int mode, const char *to);
59491 +
59492 +int gr_is_capable(const int cap);
59493 +int gr_is_capable_nolog(const int cap);
59494 +void gr_learn_resource(const struct task_struct *task, const int limit,
59495 + const unsigned long wanted, const int gt);
59496 +void gr_copy_label(struct task_struct *tsk);
59497 +void gr_handle_crash(struct task_struct *task, const int sig);
59498 +int gr_handle_signal(const struct task_struct *p, const int sig);
59499 +int gr_check_crash_uid(const uid_t uid);
59500 +int gr_check_protected_task(const struct task_struct *task);
59501 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59502 +int gr_acl_handle_mmap(const struct file *file,
59503 + const unsigned long prot);
59504 +int gr_acl_handle_mprotect(const struct file *file,
59505 + const unsigned long prot);
59506 +int gr_check_hidden_task(const struct task_struct *tsk);
59507 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59508 + const struct vfsmount *mnt);
59509 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59510 + const struct vfsmount *mnt);
59511 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59512 + const struct vfsmount *mnt, const int fmode);
59513 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59514 + const struct vfsmount *mnt, umode_t *mode);
59515 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59516 + const struct vfsmount *mnt);
59517 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59518 + const struct vfsmount *mnt);
59519 +int gr_handle_ptrace(struct task_struct *task, const long request);
59520 +int gr_handle_proc_ptrace(struct task_struct *task);
59521 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59522 + const struct vfsmount *mnt);
59523 +int gr_check_crash_exec(const struct file *filp);
59524 +int gr_acl_is_enabled(void);
59525 +void gr_set_kernel_label(struct task_struct *task);
59526 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59527 + const gid_t gid);
59528 +int gr_set_proc_label(const struct dentry *dentry,
59529 + const struct vfsmount *mnt,
59530 + const int unsafe_flags);
59531 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59532 + const struct vfsmount *mnt);
59533 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59534 + const struct vfsmount *mnt, int acc_mode);
59535 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59536 + const struct dentry *p_dentry,
59537 + const struct vfsmount *p_mnt,
59538 + int open_flags, int acc_mode, const int imode);
59539 +void gr_handle_create(const struct dentry *dentry,
59540 + const struct vfsmount *mnt);
59541 +void gr_handle_proc_create(const struct dentry *dentry,
59542 + const struct inode *inode);
59543 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59544 + const struct dentry *parent_dentry,
59545 + const struct vfsmount *parent_mnt,
59546 + const int mode);
59547 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59548 + const struct dentry *parent_dentry,
59549 + const struct vfsmount *parent_mnt);
59550 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59551 + const struct vfsmount *mnt);
59552 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59553 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59554 + const struct vfsmount *mnt);
59555 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59556 + const struct dentry *parent_dentry,
59557 + const struct vfsmount *parent_mnt,
59558 + const char *from);
59559 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59560 + const struct dentry *parent_dentry,
59561 + const struct vfsmount *parent_mnt,
59562 + const struct dentry *old_dentry,
59563 + const struct vfsmount *old_mnt, const char *to);
59564 +int gr_acl_handle_rename(struct dentry *new_dentry,
59565 + struct dentry *parent_dentry,
59566 + const struct vfsmount *parent_mnt,
59567 + struct dentry *old_dentry,
59568 + struct inode *old_parent_inode,
59569 + struct vfsmount *old_mnt, const char *newname);
59570 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59571 + struct dentry *old_dentry,
59572 + struct dentry *new_dentry,
59573 + struct vfsmount *mnt, const __u8 replace);
59574 +__u32 gr_check_link(const struct dentry *new_dentry,
59575 + const struct dentry *parent_dentry,
59576 + const struct vfsmount *parent_mnt,
59577 + const struct dentry *old_dentry,
59578 + const struct vfsmount *old_mnt);
59579 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59580 + const unsigned int namelen, const ino_t ino);
59581 +
59582 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59583 + const struct vfsmount *mnt);
59584 +void gr_acl_handle_exit(void);
59585 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59586 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59587 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59588 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59589 +void gr_audit_ptrace(struct task_struct *task);
59590 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59591 +
59592 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59593 +
59594 +#ifdef CONFIG_GRKERNSEC
59595 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59596 +void gr_handle_vm86(void);
59597 +void gr_handle_mem_readwrite(u64 from, u64 to);
59598 +
59599 +void gr_log_badprocpid(const char *entry);
59600 +
59601 +extern int grsec_enable_dmesg;
59602 +extern int grsec_disable_privio;
59603 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59604 +extern int grsec_enable_chroot_findtask;
59605 +#endif
59606 +#ifdef CONFIG_GRKERNSEC_SETXID
59607 +extern int grsec_enable_setxid;
59608 +#endif
59609 +#endif
59610 +
59611 +#endif
59612 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59613 new file mode 100644
59614 index 0000000..e7ffaaf
59615 --- /dev/null
59616 +++ b/include/linux/grsock.h
59617 @@ -0,0 +1,19 @@
59618 +#ifndef __GRSOCK_H
59619 +#define __GRSOCK_H
59620 +
59621 +extern void gr_attach_curr_ip(const struct sock *sk);
59622 +extern int gr_handle_sock_all(const int family, const int type,
59623 + const int protocol);
59624 +extern int gr_handle_sock_server(const struct sockaddr *sck);
59625 +extern int gr_handle_sock_server_other(const struct sock *sck);
59626 +extern int gr_handle_sock_client(const struct sockaddr *sck);
59627 +extern int gr_search_connect(struct socket * sock,
59628 + struct sockaddr_in * addr);
59629 +extern int gr_search_bind(struct socket * sock,
59630 + struct sockaddr_in * addr);
59631 +extern int gr_search_listen(struct socket * sock);
59632 +extern int gr_search_accept(struct socket * sock);
59633 +extern int gr_search_socket(const int domain, const int type,
59634 + const int protocol);
59635 +
59636 +#endif
59637 diff --git a/include/linux/hid.h b/include/linux/hid.h
59638 index c235e4e..f0cf7a0 100644
59639 --- a/include/linux/hid.h
59640 +++ b/include/linux/hid.h
59641 @@ -679,7 +679,7 @@ struct hid_ll_driver {
59642 unsigned int code, int value);
59643
59644 int (*parse)(struct hid_device *hdev);
59645 -};
59646 +} __no_const;
59647
59648 #define PM_HINT_FULLON 1<<5
59649 #define PM_HINT_NORMAL 1<<1
59650 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59651 index 3a93f73..b19d0b3 100644
59652 --- a/include/linux/highmem.h
59653 +++ b/include/linux/highmem.h
59654 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59655 kunmap_atomic(kaddr, KM_USER0);
59656 }
59657
59658 +static inline void sanitize_highpage(struct page *page)
59659 +{
59660 + void *kaddr;
59661 + unsigned long flags;
59662 +
59663 + local_irq_save(flags);
59664 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59665 + clear_page(kaddr);
59666 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59667 + local_irq_restore(flags);
59668 +}
59669 +
59670 static inline void zero_user_segments(struct page *page,
59671 unsigned start1, unsigned end1,
59672 unsigned start2, unsigned end2)
59673 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59674 index 07d103a..04ec65b 100644
59675 --- a/include/linux/i2c.h
59676 +++ b/include/linux/i2c.h
59677 @@ -364,6 +364,7 @@ struct i2c_algorithm {
59678 /* To determine what the adapter supports */
59679 u32 (*functionality) (struct i2c_adapter *);
59680 };
59681 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59682
59683 /*
59684 * i2c_adapter is the structure used to identify a physical i2c bus along
59685 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59686 index a6deef4..c56a7f2 100644
59687 --- a/include/linux/i2o.h
59688 +++ b/include/linux/i2o.h
59689 @@ -564,7 +564,7 @@ struct i2o_controller {
59690 struct i2o_device *exec; /* Executive */
59691 #if BITS_PER_LONG == 64
59692 spinlock_t context_list_lock; /* lock for context_list */
59693 - atomic_t context_list_counter; /* needed for unique contexts */
59694 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59695 struct list_head context_list; /* list of context id's
59696 and pointers */
59697 #endif
59698 diff --git a/include/linux/init.h b/include/linux/init.h
59699 index 9146f39..885354d 100644
59700 --- a/include/linux/init.h
59701 +++ b/include/linux/init.h
59702 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59703
59704 /* Each module must use one module_init(). */
59705 #define module_init(initfn) \
59706 - static inline initcall_t __inittest(void) \
59707 + static inline __used initcall_t __inittest(void) \
59708 { return initfn; } \
59709 int init_module(void) __attribute__((alias(#initfn)));
59710
59711 /* This is only required if you want to be unloadable. */
59712 #define module_exit(exitfn) \
59713 - static inline exitcall_t __exittest(void) \
59714 + static inline __used exitcall_t __exittest(void) \
59715 { return exitfn; } \
59716 void cleanup_module(void) __attribute__((alias(#exitfn)));
59717
59718 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59719 index 32574ee..00d4ef1 100644
59720 --- a/include/linux/init_task.h
59721 +++ b/include/linux/init_task.h
59722 @@ -128,6 +128,12 @@ extern struct cred init_cred;
59723
59724 #define INIT_TASK_COMM "swapper"
59725
59726 +#ifdef CONFIG_X86
59727 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59728 +#else
59729 +#define INIT_TASK_THREAD_INFO
59730 +#endif
59731 +
59732 /*
59733 * INIT_TASK is used to set up the first task table, touch at
59734 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59735 @@ -166,6 +172,7 @@ extern struct cred init_cred;
59736 RCU_INIT_POINTER(.cred, &init_cred), \
59737 .comm = INIT_TASK_COMM, \
59738 .thread = INIT_THREAD, \
59739 + INIT_TASK_THREAD_INFO \
59740 .fs = &init_fs, \
59741 .files = &init_files, \
59742 .signal = &init_signals, \
59743 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59744 index e6ca56d..8583707 100644
59745 --- a/include/linux/intel-iommu.h
59746 +++ b/include/linux/intel-iommu.h
59747 @@ -296,7 +296,7 @@ struct iommu_flush {
59748 u8 fm, u64 type);
59749 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59750 unsigned int size_order, u64 type);
59751 -};
59752 +} __no_const;
59753
59754 enum {
59755 SR_DMAR_FECTL_REG,
59756 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59757 index a64b00e..464d8bc 100644
59758 --- a/include/linux/interrupt.h
59759 +++ b/include/linux/interrupt.h
59760 @@ -441,7 +441,7 @@ enum
59761 /* map softirq index to softirq name. update 'softirq_to_name' in
59762 * kernel/softirq.c when adding a new softirq.
59763 */
59764 -extern char *softirq_to_name[NR_SOFTIRQS];
59765 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59766
59767 /* softirq mask and active fields moved to irq_cpustat_t in
59768 * asm/hardirq.h to get better cache usage. KAO
59769 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59770
59771 struct softirq_action
59772 {
59773 - void (*action)(struct softirq_action *);
59774 + void (*action)(void);
59775 };
59776
59777 asmlinkage void do_softirq(void);
59778 asmlinkage void __do_softirq(void);
59779 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59780 +extern void open_softirq(int nr, void (*action)(void));
59781 extern void softirq_init(void);
59782 static inline void __raise_softirq_irqoff(unsigned int nr)
59783 {
59784 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59785 index 3875719..4cd454c 100644
59786 --- a/include/linux/kallsyms.h
59787 +++ b/include/linux/kallsyms.h
59788 @@ -15,7 +15,8 @@
59789
59790 struct module;
59791
59792 -#ifdef CONFIG_KALLSYMS
59793 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59794 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59795 /* Lookup the address for a symbol. Returns 0 if not found. */
59796 unsigned long kallsyms_lookup_name(const char *name);
59797
59798 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59799 /* Stupid that this does nothing, but I didn't create this mess. */
59800 #define __print_symbol(fmt, addr)
59801 #endif /*CONFIG_KALLSYMS*/
59802 +#else /* when included by kallsyms.c, vsnprintf.c, or
59803 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59804 +extern void __print_symbol(const char *fmt, unsigned long address);
59805 +extern int sprint_backtrace(char *buffer, unsigned long address);
59806 +extern int sprint_symbol(char *buffer, unsigned long address);
59807 +const char *kallsyms_lookup(unsigned long addr,
59808 + unsigned long *symbolsize,
59809 + unsigned long *offset,
59810 + char **modname, char *namebuf);
59811 +#endif
59812
59813 /* This macro allows us to keep printk typechecking */
59814 static __printf(1, 2)
59815 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59816 index fa39183..40160be 100644
59817 --- a/include/linux/kgdb.h
59818 +++ b/include/linux/kgdb.h
59819 @@ -53,7 +53,7 @@ extern int kgdb_connected;
59820 extern int kgdb_io_module_registered;
59821
59822 extern atomic_t kgdb_setting_breakpoint;
59823 -extern atomic_t kgdb_cpu_doing_single_step;
59824 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59825
59826 extern struct task_struct *kgdb_usethread;
59827 extern struct task_struct *kgdb_contthread;
59828 @@ -251,7 +251,7 @@ struct kgdb_arch {
59829 void (*disable_hw_break)(struct pt_regs *regs);
59830 void (*remove_all_hw_break)(void);
59831 void (*correct_hw_break)(void);
59832 -};
59833 +} __do_const;
59834
59835 /**
59836 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59837 @@ -276,7 +276,7 @@ struct kgdb_io {
59838 void (*pre_exception) (void);
59839 void (*post_exception) (void);
59840 int is_console;
59841 -};
59842 +} __do_const;
59843
59844 extern struct kgdb_arch arch_kgdb_ops;
59845
59846 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59847 index b16f653..eb908f4 100644
59848 --- a/include/linux/kmod.h
59849 +++ b/include/linux/kmod.h
59850 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59851 * usually useless though. */
59852 extern __printf(2, 3)
59853 int __request_module(bool wait, const char *name, ...);
59854 +extern __printf(3, 4)
59855 +int ___request_module(bool wait, char *param_name, const char *name, ...);
59856 #define request_module(mod...) __request_module(true, mod)
59857 #define request_module_nowait(mod...) __request_module(false, mod)
59858 #define try_then_request_module(x, mod...) \
59859 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59860 index d526231..086e89b 100644
59861 --- a/include/linux/kvm_host.h
59862 +++ b/include/linux/kvm_host.h
59863 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59864 void vcpu_load(struct kvm_vcpu *vcpu);
59865 void vcpu_put(struct kvm_vcpu *vcpu);
59866
59867 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59868 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59869 struct module *module);
59870 void kvm_exit(void);
59871
59872 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59873 struct kvm_guest_debug *dbg);
59874 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59875
59876 -int kvm_arch_init(void *opaque);
59877 +int kvm_arch_init(const void *opaque);
59878 void kvm_arch_exit(void);
59879
59880 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59881 diff --git a/include/linux/libata.h b/include/linux/libata.h
59882 index cafc09a..d7e7829 100644
59883 --- a/include/linux/libata.h
59884 +++ b/include/linux/libata.h
59885 @@ -909,7 +909,7 @@ struct ata_port_operations {
59886 * fields must be pointers.
59887 */
59888 const struct ata_port_operations *inherits;
59889 -};
59890 +} __do_const;
59891
59892 struct ata_port_info {
59893 unsigned long flags;
59894 diff --git a/include/linux/mca.h b/include/linux/mca.h
59895 index 3797270..7765ede 100644
59896 --- a/include/linux/mca.h
59897 +++ b/include/linux/mca.h
59898 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59899 int region);
59900 void * (*mca_transform_memory)(struct mca_device *,
59901 void *memory);
59902 -};
59903 +} __no_const;
59904
59905 struct mca_bus {
59906 u64 default_dma_mask;
59907 diff --git a/include/linux/memory.h b/include/linux/memory.h
59908 index 935699b..11042cc 100644
59909 --- a/include/linux/memory.h
59910 +++ b/include/linux/memory.h
59911 @@ -144,7 +144,7 @@ struct memory_accessor {
59912 size_t count);
59913 ssize_t (*write)(struct memory_accessor *, const char *buf,
59914 off_t offset, size_t count);
59915 -};
59916 +} __no_const;
59917
59918 /*
59919 * Kernel text modification mutex, used for code patching. Users of this lock
59920 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59921 index 9970337..9444122 100644
59922 --- a/include/linux/mfd/abx500.h
59923 +++ b/include/linux/mfd/abx500.h
59924 @@ -188,6 +188,7 @@ struct abx500_ops {
59925 int (*event_registers_startup_state_get) (struct device *, u8 *);
59926 int (*startup_irq_enabled) (struct device *, unsigned int);
59927 };
59928 +typedef struct abx500_ops __no_const abx500_ops_no_const;
59929
59930 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59931 void abx500_remove_ops(struct device *dev);
59932 diff --git a/include/linux/mm.h b/include/linux/mm.h
59933 index 4baadd1..2e0b45e 100644
59934 --- a/include/linux/mm.h
59935 +++ b/include/linux/mm.h
59936 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59937
59938 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59939 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59940 +
59941 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59942 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59943 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59944 +#else
59945 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59946 +#endif
59947 +
59948 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59949 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59950
59951 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59952 int set_page_dirty_lock(struct page *page);
59953 int clear_page_dirty_for_io(struct page *page);
59954
59955 -/* Is the vma a continuation of the stack vma above it? */
59956 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59957 -{
59958 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59959 -}
59960 -
59961 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
59962 - unsigned long addr)
59963 -{
59964 - return (vma->vm_flags & VM_GROWSDOWN) &&
59965 - (vma->vm_start == addr) &&
59966 - !vma_growsdown(vma->vm_prev, addr);
59967 -}
59968 -
59969 -/* Is the vma a continuation of the stack vma below it? */
59970 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59971 -{
59972 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59973 -}
59974 -
59975 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
59976 - unsigned long addr)
59977 -{
59978 - return (vma->vm_flags & VM_GROWSUP) &&
59979 - (vma->vm_end == addr) &&
59980 - !vma_growsup(vma->vm_next, addr);
59981 -}
59982 -
59983 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59984 unsigned long old_addr, struct vm_area_struct *new_vma,
59985 unsigned long new_addr, unsigned long len);
59986 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59987 }
59988 #endif
59989
59990 +#ifdef CONFIG_MMU
59991 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59992 +#else
59993 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59994 +{
59995 + return __pgprot(0);
59996 +}
59997 +#endif
59998 +
59999 int vma_wants_writenotify(struct vm_area_struct *vma);
60000
60001 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
60002 @@ -1419,6 +1407,7 @@ out:
60003 }
60004
60005 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
60006 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
60007
60008 extern unsigned long do_brk(unsigned long, unsigned long);
60009
60010 @@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
60011 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
60012 struct vm_area_struct **pprev);
60013
60014 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
60015 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
60016 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
60017 +
60018 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
60019 NULL if none. Assume start_addr < end_addr. */
60020 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
60021 @@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
60022 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
60023 }
60024
60025 -#ifdef CONFIG_MMU
60026 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
60027 -#else
60028 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
60029 -{
60030 - return __pgprot(0);
60031 -}
60032 -#endif
60033 -
60034 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
60035 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
60036 unsigned long pfn, unsigned long size, pgprot_t);
60037 @@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
60038 extern int sysctl_memory_failure_early_kill;
60039 extern int sysctl_memory_failure_recovery;
60040 extern void shake_page(struct page *p, int access);
60041 -extern atomic_long_t mce_bad_pages;
60042 +extern atomic_long_unchecked_t mce_bad_pages;
60043 extern int soft_offline_page(struct page *page, int flags);
60044
60045 extern void dump_page(struct page *page);
60046 @@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
60047 unsigned int pages_per_huge_page);
60048 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
60049
60050 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60051 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
60052 +#else
60053 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
60054 +#endif
60055 +
60056 #endif /* __KERNEL__ */
60057 #endif /* _LINUX_MM_H */
60058 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
60059 index 5b42f1b..759e4b4 100644
60060 --- a/include/linux/mm_types.h
60061 +++ b/include/linux/mm_types.h
60062 @@ -253,6 +253,8 @@ struct vm_area_struct {
60063 #ifdef CONFIG_NUMA
60064 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
60065 #endif
60066 +
60067 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
60068 };
60069
60070 struct core_thread {
60071 @@ -389,6 +391,24 @@ struct mm_struct {
60072 #ifdef CONFIG_CPUMASK_OFFSTACK
60073 struct cpumask cpumask_allocation;
60074 #endif
60075 +
60076 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60077 + unsigned long pax_flags;
60078 +#endif
60079 +
60080 +#ifdef CONFIG_PAX_DLRESOLVE
60081 + unsigned long call_dl_resolve;
60082 +#endif
60083 +
60084 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60085 + unsigned long call_syscall;
60086 +#endif
60087 +
60088 +#ifdef CONFIG_PAX_ASLR
60089 + unsigned long delta_mmap; /* randomized offset */
60090 + unsigned long delta_stack; /* randomized offset */
60091 +#endif
60092 +
60093 };
60094
60095 static inline void mm_init_cpumask(struct mm_struct *mm)
60096 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
60097 index 1d1b1e1..2a13c78 100644
60098 --- a/include/linux/mmu_notifier.h
60099 +++ b/include/linux/mmu_notifier.h
60100 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
60101 */
60102 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
60103 ({ \
60104 - pte_t __pte; \
60105 + pte_t ___pte; \
60106 struct vm_area_struct *___vma = __vma; \
60107 unsigned long ___address = __address; \
60108 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
60109 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
60110 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
60111 - __pte; \
60112 + ___pte; \
60113 })
60114
60115 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
60116 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
60117 index 188cb2f..d78409b 100644
60118 --- a/include/linux/mmzone.h
60119 +++ b/include/linux/mmzone.h
60120 @@ -369,7 +369,7 @@ struct zone {
60121 unsigned long flags; /* zone flags, see below */
60122
60123 /* Zone statistics */
60124 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60125 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60126
60127 /*
60128 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
60129 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
60130 index 468819c..17b9db3 100644
60131 --- a/include/linux/mod_devicetable.h
60132 +++ b/include/linux/mod_devicetable.h
60133 @@ -12,7 +12,7 @@
60134 typedef unsigned long kernel_ulong_t;
60135 #endif
60136
60137 -#define PCI_ANY_ID (~0)
60138 +#define PCI_ANY_ID ((__u16)~0)
60139
60140 struct pci_device_id {
60141 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
60142 @@ -131,7 +131,7 @@ struct usb_device_id {
60143 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
60144 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
60145
60146 -#define HID_ANY_ID (~0)
60147 +#define HID_ANY_ID (~0U)
60148
60149 struct hid_device_id {
60150 __u16 bus;
60151 diff --git a/include/linux/module.h b/include/linux/module.h
60152 index 3cb7839..511cb87 100644
60153 --- a/include/linux/module.h
60154 +++ b/include/linux/module.h
60155 @@ -17,6 +17,7 @@
60156 #include <linux/moduleparam.h>
60157 #include <linux/tracepoint.h>
60158 #include <linux/export.h>
60159 +#include <linux/fs.h>
60160
60161 #include <linux/percpu.h>
60162 #include <asm/module.h>
60163 @@ -261,19 +262,16 @@ struct module
60164 int (*init)(void);
60165
60166 /* If this is non-NULL, vfree after init() returns */
60167 - void *module_init;
60168 + void *module_init_rx, *module_init_rw;
60169
60170 /* Here is the actual code + data, vfree'd on unload. */
60171 - void *module_core;
60172 + void *module_core_rx, *module_core_rw;
60173
60174 /* Here are the sizes of the init and core sections */
60175 - unsigned int init_size, core_size;
60176 + unsigned int init_size_rw, core_size_rw;
60177
60178 /* The size of the executable code in each section. */
60179 - unsigned int init_text_size, core_text_size;
60180 -
60181 - /* Size of RO sections of the module (text+rodata) */
60182 - unsigned int init_ro_size, core_ro_size;
60183 + unsigned int init_size_rx, core_size_rx;
60184
60185 /* Arch-specific module values */
60186 struct mod_arch_specific arch;
60187 @@ -329,6 +327,10 @@ struct module
60188 #ifdef CONFIG_EVENT_TRACING
60189 struct ftrace_event_call **trace_events;
60190 unsigned int num_trace_events;
60191 + struct file_operations trace_id;
60192 + struct file_operations trace_enable;
60193 + struct file_operations trace_format;
60194 + struct file_operations trace_filter;
60195 #endif
60196 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
60197 unsigned int num_ftrace_callsites;
60198 @@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
60199 bool is_module_percpu_address(unsigned long addr);
60200 bool is_module_text_address(unsigned long addr);
60201
60202 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
60203 +{
60204 +
60205 +#ifdef CONFIG_PAX_KERNEXEC
60206 + if (ktla_ktva(addr) >= (unsigned long)start &&
60207 + ktla_ktva(addr) < (unsigned long)start + size)
60208 + return 1;
60209 +#endif
60210 +
60211 + return ((void *)addr >= start && (void *)addr < start + size);
60212 +}
60213 +
60214 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
60215 +{
60216 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
60217 +}
60218 +
60219 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
60220 +{
60221 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
60222 +}
60223 +
60224 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
60225 +{
60226 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
60227 +}
60228 +
60229 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
60230 +{
60231 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
60232 +}
60233 +
60234 static inline int within_module_core(unsigned long addr, struct module *mod)
60235 {
60236 - return (unsigned long)mod->module_core <= addr &&
60237 - addr < (unsigned long)mod->module_core + mod->core_size;
60238 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
60239 }
60240
60241 static inline int within_module_init(unsigned long addr, struct module *mod)
60242 {
60243 - return (unsigned long)mod->module_init <= addr &&
60244 - addr < (unsigned long)mod->module_init + mod->init_size;
60245 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
60246 }
60247
60248 /* Search for module by name: must hold module_mutex. */
60249 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
60250 index b2be02e..6a9fdb1 100644
60251 --- a/include/linux/moduleloader.h
60252 +++ b/include/linux/moduleloader.h
60253 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
60254 sections. Returns NULL on failure. */
60255 void *module_alloc(unsigned long size);
60256
60257 +#ifdef CONFIG_PAX_KERNEXEC
60258 +void *module_alloc_exec(unsigned long size);
60259 +#else
60260 +#define module_alloc_exec(x) module_alloc(x)
60261 +#endif
60262 +
60263 /* Free memory returned from module_alloc. */
60264 void module_free(struct module *mod, void *module_region);
60265
60266 +#ifdef CONFIG_PAX_KERNEXEC
60267 +void module_free_exec(struct module *mod, void *module_region);
60268 +#else
60269 +#define module_free_exec(x, y) module_free((x), (y))
60270 +#endif
60271 +
60272 /* Apply the given relocation to the (simplified) ELF. Return -error
60273 or 0. */
60274 int apply_relocate(Elf_Shdr *sechdrs,
60275 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
60276 index 7939f63..ec6df57 100644
60277 --- a/include/linux/moduleparam.h
60278 +++ b/include/linux/moduleparam.h
60279 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
60280 * @len is usually just sizeof(string).
60281 */
60282 #define module_param_string(name, string, len, perm) \
60283 - static const struct kparam_string __param_string_##name \
60284 + static const struct kparam_string __param_string_##name __used \
60285 = { len, string }; \
60286 __module_param_call(MODULE_PARAM_PREFIX, name, \
60287 &param_ops_string, \
60288 @@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
60289 * module_param_named() for why this might be necessary.
60290 */
60291 #define module_param_array_named(name, array, type, nump, perm) \
60292 - static const struct kparam_array __param_arr_##name \
60293 + static const struct kparam_array __param_arr_##name __used \
60294 = { .max = ARRAY_SIZE(array), .num = nump, \
60295 .ops = &param_ops_##type, \
60296 .elemsize = sizeof(array[0]), .elem = array }; \
60297 diff --git a/include/linux/namei.h b/include/linux/namei.h
60298 index ffc0213..2c1f2cb 100644
60299 --- a/include/linux/namei.h
60300 +++ b/include/linux/namei.h
60301 @@ -24,7 +24,7 @@ struct nameidata {
60302 unsigned seq;
60303 int last_type;
60304 unsigned depth;
60305 - char *saved_names[MAX_NESTED_LINKS + 1];
60306 + const char *saved_names[MAX_NESTED_LINKS + 1];
60307
60308 /* Intent data */
60309 union {
60310 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
60311 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60312 extern void unlock_rename(struct dentry *, struct dentry *);
60313
60314 -static inline void nd_set_link(struct nameidata *nd, char *path)
60315 +static inline void nd_set_link(struct nameidata *nd, const char *path)
60316 {
60317 nd->saved_names[nd->depth] = path;
60318 }
60319
60320 -static inline char *nd_get_link(struct nameidata *nd)
60321 +static inline const char *nd_get_link(const struct nameidata *nd)
60322 {
60323 return nd->saved_names[nd->depth];
60324 }
60325 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
60326 index a82ad4d..90d15b7 100644
60327 --- a/include/linux/netdevice.h
60328 +++ b/include/linux/netdevice.h
60329 @@ -949,6 +949,7 @@ struct net_device_ops {
60330 int (*ndo_set_features)(struct net_device *dev,
60331 u32 features);
60332 };
60333 +typedef struct net_device_ops __no_const net_device_ops_no_const;
60334
60335 /*
60336 * The DEVICE structure.
60337 @@ -1088,7 +1089,7 @@ struct net_device {
60338 int iflink;
60339
60340 struct net_device_stats stats;
60341 - atomic_long_t rx_dropped; /* dropped packets by core network
60342 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
60343 * Do not use this in drivers.
60344 */
60345
60346 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
60347 new file mode 100644
60348 index 0000000..33f4af8
60349 --- /dev/null
60350 +++ b/include/linux/netfilter/xt_gradm.h
60351 @@ -0,0 +1,9 @@
60352 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
60353 +#define _LINUX_NETFILTER_XT_GRADM_H 1
60354 +
60355 +struct xt_gradm_mtinfo {
60356 + __u16 flags;
60357 + __u16 invflags;
60358 +};
60359 +
60360 +#endif
60361 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
60362 index c65a18a..0c05f3a 100644
60363 --- a/include/linux/of_pdt.h
60364 +++ b/include/linux/of_pdt.h
60365 @@ -32,7 +32,7 @@ struct of_pdt_ops {
60366
60367 /* return 0 on success; fill in 'len' with number of bytes in path */
60368 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
60369 -};
60370 +} __no_const;
60371
60372 extern void *prom_early_alloc(unsigned long size);
60373
60374 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
60375 index a4c5624..79d6d88 100644
60376 --- a/include/linux/oprofile.h
60377 +++ b/include/linux/oprofile.h
60378 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
60379 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60380 char const * name, ulong * val);
60381
60382 -/** Create a file for read-only access to an atomic_t. */
60383 +/** Create a file for read-only access to an atomic_unchecked_t. */
60384 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60385 - char const * name, atomic_t * val);
60386 + char const * name, atomic_unchecked_t * val);
60387
60388 /** create a directory */
60389 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60390 diff --git a/include/linux/padata.h b/include/linux/padata.h
60391 index 4633b2f..988bc08 100644
60392 --- a/include/linux/padata.h
60393 +++ b/include/linux/padata.h
60394 @@ -129,7 +129,7 @@ struct parallel_data {
60395 struct padata_instance *pinst;
60396 struct padata_parallel_queue __percpu *pqueue;
60397 struct padata_serial_queue __percpu *squeue;
60398 - atomic_t seq_nr;
60399 + atomic_unchecked_t seq_nr;
60400 atomic_t reorder_objects;
60401 atomic_t refcnt;
60402 unsigned int max_seq_nr;
60403 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60404 index b1f8912..c955bff 100644
60405 --- a/include/linux/perf_event.h
60406 +++ b/include/linux/perf_event.h
60407 @@ -748,8 +748,8 @@ struct perf_event {
60408
60409 enum perf_event_active_state state;
60410 unsigned int attach_state;
60411 - local64_t count;
60412 - atomic64_t child_count;
60413 + local64_t count; /* PaX: fix it one day */
60414 + atomic64_unchecked_t child_count;
60415
60416 /*
60417 * These are the total time in nanoseconds that the event
60418 @@ -800,8 +800,8 @@ struct perf_event {
60419 * These accumulate total time (in nanoseconds) that children
60420 * events have been enabled and running, respectively.
60421 */
60422 - atomic64_t child_total_time_enabled;
60423 - atomic64_t child_total_time_running;
60424 + atomic64_unchecked_t child_total_time_enabled;
60425 + atomic64_unchecked_t child_total_time_running;
60426
60427 /*
60428 * Protect attach/detach and child_list:
60429 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60430 index 77257c9..51d473a 100644
60431 --- a/include/linux/pipe_fs_i.h
60432 +++ b/include/linux/pipe_fs_i.h
60433 @@ -46,9 +46,9 @@ struct pipe_buffer {
60434 struct pipe_inode_info {
60435 wait_queue_head_t wait;
60436 unsigned int nrbufs, curbuf, buffers;
60437 - unsigned int readers;
60438 - unsigned int writers;
60439 - unsigned int waiting_writers;
60440 + atomic_t readers;
60441 + atomic_t writers;
60442 + atomic_t waiting_writers;
60443 unsigned int r_counter;
60444 unsigned int w_counter;
60445 struct page *tmp_page;
60446 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60447 index d3085e7..fd01052 100644
60448 --- a/include/linux/pm_runtime.h
60449 +++ b/include/linux/pm_runtime.h
60450 @@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60451
60452 static inline void pm_runtime_mark_last_busy(struct device *dev)
60453 {
60454 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
60455 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60456 }
60457
60458 #else /* !CONFIG_PM_RUNTIME */
60459 diff --git a/include/linux/poison.h b/include/linux/poison.h
60460 index 79159de..f1233a9 100644
60461 --- a/include/linux/poison.h
60462 +++ b/include/linux/poison.h
60463 @@ -19,8 +19,8 @@
60464 * under normal circumstances, used to verify that nobody uses
60465 * non-initialized list entries.
60466 */
60467 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60468 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60469 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60470 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60471
60472 /********** include/linux/timer.h **********/
60473 /*
60474 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60475 index 58969b2..ead129b 100644
60476 --- a/include/linux/preempt.h
60477 +++ b/include/linux/preempt.h
60478 @@ -123,7 +123,7 @@ struct preempt_ops {
60479 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60480 void (*sched_out)(struct preempt_notifier *notifier,
60481 struct task_struct *next);
60482 -};
60483 +} __no_const;
60484
60485 /**
60486 * preempt_notifier - key for installing preemption notifiers
60487 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60488 index 643b96c..ef55a9c 100644
60489 --- a/include/linux/proc_fs.h
60490 +++ b/include/linux/proc_fs.h
60491 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60492 return proc_create_data(name, mode, parent, proc_fops, NULL);
60493 }
60494
60495 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60496 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60497 +{
60498 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60499 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60500 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60501 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60502 +#else
60503 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60504 +#endif
60505 +}
60506 +
60507 +
60508 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60509 mode_t mode, struct proc_dir_entry *base,
60510 read_proc_t *read_proc, void * data)
60511 @@ -258,7 +271,7 @@ union proc_op {
60512 int (*proc_show)(struct seq_file *m,
60513 struct pid_namespace *ns, struct pid *pid,
60514 struct task_struct *task);
60515 -};
60516 +} __no_const;
60517
60518 struct ctl_table_header;
60519 struct ctl_table;
60520 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60521 index 800f113..e9ee2e3 100644
60522 --- a/include/linux/ptrace.h
60523 +++ b/include/linux/ptrace.h
60524 @@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60525 extern void exit_ptrace(struct task_struct *tracer);
60526 #define PTRACE_MODE_READ 1
60527 #define PTRACE_MODE_ATTACH 2
60528 -/* Returns 0 on success, -errno on denial. */
60529 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60530 /* Returns true on success, false on denial. */
60531 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60532 +/* Returns true on success, false on denial. */
60533 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60534 +/* Returns true on success, false on denial. */
60535 +extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60536
60537 static inline int ptrace_reparented(struct task_struct *child)
60538 {
60539 diff --git a/include/linux/random.h b/include/linux/random.h
60540 index 8f74538..02a1012 100644
60541 --- a/include/linux/random.h
60542 +++ b/include/linux/random.h
60543 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
60544
60545 u32 prandom32(struct rnd_state *);
60546
60547 +static inline unsigned long pax_get_random_long(void)
60548 +{
60549 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60550 +}
60551 +
60552 /*
60553 * Handle minimum values for seeds
60554 */
60555 static inline u32 __seed(u32 x, u32 m)
60556 {
60557 - return (x < m) ? x + m : x;
60558 + return (x <= m) ? x + m + 1 : x;
60559 }
60560
60561 /**
60562 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60563 index e0879a7..a12f962 100644
60564 --- a/include/linux/reboot.h
60565 +++ b/include/linux/reboot.h
60566 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60567 * Architecture-specific implementations of sys_reboot commands.
60568 */
60569
60570 -extern void machine_restart(char *cmd);
60571 -extern void machine_halt(void);
60572 -extern void machine_power_off(void);
60573 +extern void machine_restart(char *cmd) __noreturn;
60574 +extern void machine_halt(void) __noreturn;
60575 +extern void machine_power_off(void) __noreturn;
60576
60577 extern void machine_shutdown(void);
60578 struct pt_regs;
60579 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60580 */
60581
60582 extern void kernel_restart_prepare(char *cmd);
60583 -extern void kernel_restart(char *cmd);
60584 -extern void kernel_halt(void);
60585 -extern void kernel_power_off(void);
60586 +extern void kernel_restart(char *cmd) __noreturn;
60587 +extern void kernel_halt(void) __noreturn;
60588 +extern void kernel_power_off(void) __noreturn;
60589
60590 extern int C_A_D; /* for sysctl */
60591 void ctrl_alt_del(void);
60592 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60593 * Emergency restart, callable from an interrupt handler.
60594 */
60595
60596 -extern void emergency_restart(void);
60597 +extern void emergency_restart(void) __noreturn;
60598 #include <asm/emergency-restart.h>
60599
60600 #endif
60601 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60602 index 96d465f..b084e05 100644
60603 --- a/include/linux/reiserfs_fs.h
60604 +++ b/include/linux/reiserfs_fs.h
60605 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60606 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60607
60608 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60609 -#define get_generation(s) atomic_read (&fs_generation(s))
60610 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60611 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60612 #define __fs_changed(gen,s) (gen != get_generation (s))
60613 #define fs_changed(gen,s) \
60614 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60615 index 52c83b6..18ed7eb 100644
60616 --- a/include/linux/reiserfs_fs_sb.h
60617 +++ b/include/linux/reiserfs_fs_sb.h
60618 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60619 /* Comment? -Hans */
60620 wait_queue_head_t s_wait;
60621 /* To be obsoleted soon by per buffer seals.. -Hans */
60622 - atomic_t s_generation_counter; // increased by one every time the
60623 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60624 // tree gets re-balanced
60625 unsigned long s_properties; /* File system properties. Currently holds
60626 on-disk FS format */
60627 diff --git a/include/linux/relay.h b/include/linux/relay.h
60628 index 14a86bc..17d0700 100644
60629 --- a/include/linux/relay.h
60630 +++ b/include/linux/relay.h
60631 @@ -159,7 +159,7 @@ struct rchan_callbacks
60632 * The callback should return 0 if successful, negative if not.
60633 */
60634 int (*remove_buf_file)(struct dentry *dentry);
60635 -};
60636 +} __no_const;
60637
60638 /*
60639 * CONFIG_RELAY kernel API, kernel/relay.c
60640 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60641 index c6c6084..5bf1212 100644
60642 --- a/include/linux/rfkill.h
60643 +++ b/include/linux/rfkill.h
60644 @@ -147,6 +147,7 @@ struct rfkill_ops {
60645 void (*query)(struct rfkill *rfkill, void *data);
60646 int (*set_block)(void *data, bool blocked);
60647 };
60648 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60649
60650 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60651 /**
60652 diff --git a/include/linux/rio.h b/include/linux/rio.h
60653 index 4d50611..c6858a2 100644
60654 --- a/include/linux/rio.h
60655 +++ b/include/linux/rio.h
60656 @@ -315,7 +315,7 @@ struct rio_ops {
60657 int mbox, void *buffer, size_t len);
60658 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60659 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60660 -};
60661 +} __no_const;
60662
60663 #define RIO_RESOURCE_MEM 0x00000100
60664 #define RIO_RESOURCE_DOORBELL 0x00000200
60665 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60666 index 2148b12..519b820 100644
60667 --- a/include/linux/rmap.h
60668 +++ b/include/linux/rmap.h
60669 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60670 void anon_vma_init(void); /* create anon_vma_cachep */
60671 int anon_vma_prepare(struct vm_area_struct *);
60672 void unlink_anon_vmas(struct vm_area_struct *);
60673 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60674 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60675 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60676 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60677 void __anon_vma_link(struct vm_area_struct *);
60678
60679 static inline void anon_vma_merge(struct vm_area_struct *vma,
60680 diff --git a/include/linux/sched.h b/include/linux/sched.h
60681 index 1c4f3e9..b4e4851 100644
60682 --- a/include/linux/sched.h
60683 +++ b/include/linux/sched.h
60684 @@ -101,6 +101,7 @@ struct bio_list;
60685 struct fs_struct;
60686 struct perf_event_context;
60687 struct blk_plug;
60688 +struct linux_binprm;
60689
60690 /*
60691 * List of flags we want to share for kernel threads,
60692 @@ -380,10 +381,13 @@ struct user_namespace;
60693 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60694
60695 extern int sysctl_max_map_count;
60696 +extern unsigned long sysctl_heap_stack_gap;
60697
60698 #include <linux/aio.h>
60699
60700 #ifdef CONFIG_MMU
60701 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60702 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60703 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60704 extern unsigned long
60705 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60706 @@ -629,6 +633,17 @@ struct signal_struct {
60707 #ifdef CONFIG_TASKSTATS
60708 struct taskstats *stats;
60709 #endif
60710 +
60711 +#ifdef CONFIG_GRKERNSEC
60712 + u32 curr_ip;
60713 + u32 saved_ip;
60714 + u32 gr_saddr;
60715 + u32 gr_daddr;
60716 + u16 gr_sport;
60717 + u16 gr_dport;
60718 + u8 used_accept:1;
60719 +#endif
60720 +
60721 #ifdef CONFIG_AUDIT
60722 unsigned audit_tty;
60723 struct tty_audit_buf *tty_audit_buf;
60724 @@ -710,6 +725,11 @@ struct user_struct {
60725 struct key *session_keyring; /* UID's default session keyring */
60726 #endif
60727
60728 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60729 + unsigned int banned;
60730 + unsigned long ban_expires;
60731 +#endif
60732 +
60733 /* Hash table maintenance information */
60734 struct hlist_node uidhash_node;
60735 uid_t uid;
60736 @@ -1337,8 +1357,8 @@ struct task_struct {
60737 struct list_head thread_group;
60738
60739 struct completion *vfork_done; /* for vfork() */
60740 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60741 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60742 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60743 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60744
60745 cputime_t utime, stime, utimescaled, stimescaled;
60746 cputime_t gtime;
60747 @@ -1354,13 +1374,6 @@ struct task_struct {
60748 struct task_cputime cputime_expires;
60749 struct list_head cpu_timers[3];
60750
60751 -/* process credentials */
60752 - const struct cred __rcu *real_cred; /* objective and real subjective task
60753 - * credentials (COW) */
60754 - const struct cred __rcu *cred; /* effective (overridable) subjective task
60755 - * credentials (COW) */
60756 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60757 -
60758 char comm[TASK_COMM_LEN]; /* executable name excluding path
60759 - access with [gs]et_task_comm (which lock
60760 it with task_lock())
60761 @@ -1377,8 +1390,16 @@ struct task_struct {
60762 #endif
60763 /* CPU-specific state of this task */
60764 struct thread_struct thread;
60765 +/* thread_info moved to task_struct */
60766 +#ifdef CONFIG_X86
60767 + struct thread_info tinfo;
60768 +#endif
60769 /* filesystem information */
60770 struct fs_struct *fs;
60771 +
60772 + const struct cred __rcu *cred; /* effective (overridable) subjective task
60773 + * credentials (COW) */
60774 +
60775 /* open file information */
60776 struct files_struct *files;
60777 /* namespaces */
60778 @@ -1425,6 +1446,11 @@ struct task_struct {
60779 struct rt_mutex_waiter *pi_blocked_on;
60780 #endif
60781
60782 +/* process credentials */
60783 + const struct cred __rcu *real_cred; /* objective and real subjective task
60784 + * credentials (COW) */
60785 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60786 +
60787 #ifdef CONFIG_DEBUG_MUTEXES
60788 /* mutex deadlock detection */
60789 struct mutex_waiter *blocked_on;
60790 @@ -1540,6 +1566,27 @@ struct task_struct {
60791 unsigned long default_timer_slack_ns;
60792
60793 struct list_head *scm_work_list;
60794 +
60795 +#ifdef CONFIG_GRKERNSEC
60796 + /* grsecurity */
60797 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60798 + u64 exec_id;
60799 +#endif
60800 +#ifdef CONFIG_GRKERNSEC_SETXID
60801 + const struct cred *delayed_cred;
60802 +#endif
60803 + struct dentry *gr_chroot_dentry;
60804 + struct acl_subject_label *acl;
60805 + struct acl_role_label *role;
60806 + struct file *exec_file;
60807 + u16 acl_role_id;
60808 + /* is this the task that authenticated to the special role */
60809 + u8 acl_sp_role;
60810 + u8 is_writable;
60811 + u8 brute;
60812 + u8 gr_is_chrooted;
60813 +#endif
60814 +
60815 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60816 /* Index of current stored address in ret_stack */
60817 int curr_ret_stack;
60818 @@ -1574,6 +1621,51 @@ struct task_struct {
60819 #endif
60820 };
60821
60822 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60823 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60824 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60825 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60826 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60827 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60828 +
60829 +#ifdef CONFIG_PAX_SOFTMODE
60830 +extern int pax_softmode;
60831 +#endif
60832 +
60833 +extern int pax_check_flags(unsigned long *);
60834 +
60835 +/* if tsk != current then task_lock must be held on it */
60836 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60837 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60838 +{
60839 + if (likely(tsk->mm))
60840 + return tsk->mm->pax_flags;
60841 + else
60842 + return 0UL;
60843 +}
60844 +
60845 +/* if tsk != current then task_lock must be held on it */
60846 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60847 +{
60848 + if (likely(tsk->mm)) {
60849 + tsk->mm->pax_flags = flags;
60850 + return 0;
60851 + }
60852 + return -EINVAL;
60853 +}
60854 +#endif
60855 +
60856 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60857 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60858 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60859 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60860 +#endif
60861 +
60862 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60863 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60864 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60865 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60866 +
60867 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60868 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60869
60870 @@ -2081,7 +2173,9 @@ void yield(void);
60871 extern struct exec_domain default_exec_domain;
60872
60873 union thread_union {
60874 +#ifndef CONFIG_X86
60875 struct thread_info thread_info;
60876 +#endif
60877 unsigned long stack[THREAD_SIZE/sizeof(long)];
60878 };
60879
60880 @@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns;
60881 */
60882
60883 extern struct task_struct *find_task_by_vpid(pid_t nr);
60884 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60885 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60886 struct pid_namespace *ns);
60887
60888 @@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm)
60889 extern void mmput(struct mm_struct *);
60890 /* Grab a reference to a task's mm, if it is not already going away */
60891 extern struct mm_struct *get_task_mm(struct task_struct *task);
60892 +/*
60893 + * Grab a reference to a task's mm, if it is not already going away
60894 + * and ptrace_may_access with the mode parameter passed to it
60895 + * succeeds.
60896 + */
60897 +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
60898 /* Remove the current tasks stale references to the old mm_struct */
60899 extern void mm_release(struct task_struct *, struct mm_struct *);
60900 /* Allocate a new mm structure and copy contents from tsk->mm */
60901 @@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60902 extern void exit_itimers(struct signal_struct *);
60903 extern void flush_itimer_signals(void);
60904
60905 -extern NORET_TYPE void do_group_exit(int);
60906 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60907
60908 extern void daemonize(const char *, ...);
60909 extern int allow_signal(int);
60910 @@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60911
60912 #endif
60913
60914 -static inline int object_is_on_stack(void *obj)
60915 +static inline int object_starts_on_stack(void *obj)
60916 {
60917 - void *stack = task_stack_page(current);
60918 + const void *stack = task_stack_page(current);
60919
60920 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60921 }
60922
60923 +#ifdef CONFIG_PAX_USERCOPY
60924 +extern int object_is_on_stack(const void *obj, unsigned long len);
60925 +#endif
60926 +
60927 extern void thread_info_cache_init(void);
60928
60929 #ifdef CONFIG_DEBUG_STACK_USAGE
60930 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60931 index 899fbb4..1cb4138 100644
60932 --- a/include/linux/screen_info.h
60933 +++ b/include/linux/screen_info.h
60934 @@ -43,7 +43,8 @@ struct screen_info {
60935 __u16 pages; /* 0x32 */
60936 __u16 vesa_attributes; /* 0x34 */
60937 __u32 capabilities; /* 0x36 */
60938 - __u8 _reserved[6]; /* 0x3a */
60939 + __u16 vesapm_size; /* 0x3a */
60940 + __u8 _reserved[4]; /* 0x3c */
60941 } __attribute__((packed));
60942
60943 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60944 diff --git a/include/linux/security.h b/include/linux/security.h
60945 index e8c619d..e0cbd1c 100644
60946 --- a/include/linux/security.h
60947 +++ b/include/linux/security.h
60948 @@ -37,6 +37,7 @@
60949 #include <linux/xfrm.h>
60950 #include <linux/slab.h>
60951 #include <linux/xattr.h>
60952 +#include <linux/grsecurity.h>
60953 #include <net/flow.h>
60954
60955 /* Maximum number of letters for an LSM name string */
60956 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60957 index 0b69a46..b2ffa4c 100644
60958 --- a/include/linux/seq_file.h
60959 +++ b/include/linux/seq_file.h
60960 @@ -24,6 +24,9 @@ struct seq_file {
60961 struct mutex lock;
60962 const struct seq_operations *op;
60963 int poll_event;
60964 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60965 + u64 exec_id;
60966 +#endif
60967 void *private;
60968 };
60969
60970 @@ -33,6 +36,7 @@ struct seq_operations {
60971 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60972 int (*show) (struct seq_file *m, void *v);
60973 };
60974 +typedef struct seq_operations __no_const seq_operations_no_const;
60975
60976 #define SEQ_SKIP 1
60977
60978 diff --git a/include/linux/shm.h b/include/linux/shm.h
60979 index 92808b8..c28cac4 100644
60980 --- a/include/linux/shm.h
60981 +++ b/include/linux/shm.h
60982 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60983
60984 /* The task created the shm object. NULL if the task is dead. */
60985 struct task_struct *shm_creator;
60986 +#ifdef CONFIG_GRKERNSEC
60987 + time_t shm_createtime;
60988 + pid_t shm_lapid;
60989 +#endif
60990 };
60991
60992 /* shm_mode upper byte flags */
60993 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
60994 index fe86488..1563c1c 100644
60995 --- a/include/linux/skbuff.h
60996 +++ b/include/linux/skbuff.h
60997 @@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
60998 */
60999 static inline int skb_queue_empty(const struct sk_buff_head *list)
61000 {
61001 - return list->next == (struct sk_buff *)list;
61002 + return list->next == (const struct sk_buff *)list;
61003 }
61004
61005 /**
61006 @@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
61007 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
61008 const struct sk_buff *skb)
61009 {
61010 - return skb->next == (struct sk_buff *)list;
61011 + return skb->next == (const struct sk_buff *)list;
61012 }
61013
61014 /**
61015 @@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
61016 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
61017 const struct sk_buff *skb)
61018 {
61019 - return skb->prev == (struct sk_buff *)list;
61020 + return skb->prev == (const struct sk_buff *)list;
61021 }
61022
61023 /**
61024 @@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
61025 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
61026 */
61027 #ifndef NET_SKB_PAD
61028 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
61029 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
61030 #endif
61031
61032 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
61033 diff --git a/include/linux/slab.h b/include/linux/slab.h
61034 index 573c809..e84c132 100644
61035 --- a/include/linux/slab.h
61036 +++ b/include/linux/slab.h
61037 @@ -11,12 +11,20 @@
61038
61039 #include <linux/gfp.h>
61040 #include <linux/types.h>
61041 +#include <linux/err.h>
61042
61043 /*
61044 * Flags to pass to kmem_cache_create().
61045 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
61046 */
61047 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
61048 +
61049 +#ifdef CONFIG_PAX_USERCOPY
61050 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
61051 +#else
61052 +#define SLAB_USERCOPY 0x00000000UL
61053 +#endif
61054 +
61055 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
61056 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
61057 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
61058 @@ -87,10 +95,13 @@
61059 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
61060 * Both make kfree a no-op.
61061 */
61062 -#define ZERO_SIZE_PTR ((void *)16)
61063 +#define ZERO_SIZE_PTR \
61064 +({ \
61065 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
61066 + (void *)(-MAX_ERRNO-1L); \
61067 +})
61068
61069 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
61070 - (unsigned long)ZERO_SIZE_PTR)
61071 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
61072
61073 /*
61074 * struct kmem_cache related prototypes
61075 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
61076 void kfree(const void *);
61077 void kzfree(const void *);
61078 size_t ksize(const void *);
61079 +void check_object_size(const void *ptr, unsigned long n, bool to);
61080
61081 /*
61082 * Allocator specific definitions. These are mainly used to establish optimized
61083 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
61084
61085 void __init kmem_cache_init_late(void);
61086
61087 +#define kmalloc(x, y) \
61088 +({ \
61089 + void *___retval; \
61090 + intoverflow_t ___x = (intoverflow_t)x; \
61091 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
61092 + ___retval = NULL; \
61093 + else \
61094 + ___retval = kmalloc((size_t)___x, (y)); \
61095 + ___retval; \
61096 +})
61097 +
61098 +#define kmalloc_node(x, y, z) \
61099 +({ \
61100 + void *___retval; \
61101 + intoverflow_t ___x = (intoverflow_t)x; \
61102 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
61103 + ___retval = NULL; \
61104 + else \
61105 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
61106 + ___retval; \
61107 +})
61108 +
61109 +#define kzalloc(x, y) \
61110 +({ \
61111 + void *___retval; \
61112 + intoverflow_t ___x = (intoverflow_t)x; \
61113 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
61114 + ___retval = NULL; \
61115 + else \
61116 + ___retval = kzalloc((size_t)___x, (y)); \
61117 + ___retval; \
61118 +})
61119 +
61120 +#define __krealloc(x, y, z) \
61121 +({ \
61122 + void *___retval; \
61123 + intoverflow_t ___y = (intoverflow_t)y; \
61124 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
61125 + ___retval = NULL; \
61126 + else \
61127 + ___retval = __krealloc((x), (size_t)___y, (z)); \
61128 + ___retval; \
61129 +})
61130 +
61131 +#define krealloc(x, y, z) \
61132 +({ \
61133 + void *___retval; \
61134 + intoverflow_t ___y = (intoverflow_t)y; \
61135 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
61136 + ___retval = NULL; \
61137 + else \
61138 + ___retval = krealloc((x), (size_t)___y, (z)); \
61139 + ___retval; \
61140 +})
61141 +
61142 #endif /* _LINUX_SLAB_H */
61143 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
61144 index d00e0ba..1b3bf7b 100644
61145 --- a/include/linux/slab_def.h
61146 +++ b/include/linux/slab_def.h
61147 @@ -68,10 +68,10 @@ struct kmem_cache {
61148 unsigned long node_allocs;
61149 unsigned long node_frees;
61150 unsigned long node_overflow;
61151 - atomic_t allochit;
61152 - atomic_t allocmiss;
61153 - atomic_t freehit;
61154 - atomic_t freemiss;
61155 + atomic_unchecked_t allochit;
61156 + atomic_unchecked_t allocmiss;
61157 + atomic_unchecked_t freehit;
61158 + atomic_unchecked_t freemiss;
61159
61160 /*
61161 * If debugging is enabled, then the allocator can add additional
61162 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
61163 index a32bcfd..53b71f4 100644
61164 --- a/include/linux/slub_def.h
61165 +++ b/include/linux/slub_def.h
61166 @@ -89,7 +89,7 @@ struct kmem_cache {
61167 struct kmem_cache_order_objects max;
61168 struct kmem_cache_order_objects min;
61169 gfp_t allocflags; /* gfp flags to use on each alloc */
61170 - int refcount; /* Refcount for slab cache destroy */
61171 + atomic_t refcount; /* Refcount for slab cache destroy */
61172 void (*ctor)(void *);
61173 int inuse; /* Offset to metadata */
61174 int align; /* Alignment */
61175 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
61176 }
61177
61178 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
61179 -void *__kmalloc(size_t size, gfp_t flags);
61180 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
61181
61182 static __always_inline void *
61183 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
61184 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
61185 index de8832d..0147b46 100644
61186 --- a/include/linux/sonet.h
61187 +++ b/include/linux/sonet.h
61188 @@ -61,7 +61,7 @@ struct sonet_stats {
61189 #include <linux/atomic.h>
61190
61191 struct k_sonet_stats {
61192 -#define __HANDLE_ITEM(i) atomic_t i
61193 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
61194 __SONET_ITEMS
61195 #undef __HANDLE_ITEM
61196 };
61197 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
61198 index 3d8f9c4..69f1c0a 100644
61199 --- a/include/linux/sunrpc/clnt.h
61200 +++ b/include/linux/sunrpc/clnt.h
61201 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
61202 {
61203 switch (sap->sa_family) {
61204 case AF_INET:
61205 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
61206 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
61207 case AF_INET6:
61208 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
61209 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
61210 }
61211 return 0;
61212 }
61213 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
61214 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
61215 const struct sockaddr *src)
61216 {
61217 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
61218 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
61219 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
61220
61221 dsin->sin_family = ssin->sin_family;
61222 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
61223 if (sa->sa_family != AF_INET6)
61224 return 0;
61225
61226 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
61227 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
61228 }
61229
61230 #endif /* __KERNEL__ */
61231 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
61232 index e775689..9e206d9 100644
61233 --- a/include/linux/sunrpc/sched.h
61234 +++ b/include/linux/sunrpc/sched.h
61235 @@ -105,6 +105,7 @@ struct rpc_call_ops {
61236 void (*rpc_call_done)(struct rpc_task *, void *);
61237 void (*rpc_release)(void *);
61238 };
61239 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
61240
61241 struct rpc_task_setup {
61242 struct rpc_task *task;
61243 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
61244 index c14fe86..393245e 100644
61245 --- a/include/linux/sunrpc/svc_rdma.h
61246 +++ b/include/linux/sunrpc/svc_rdma.h
61247 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
61248 extern unsigned int svcrdma_max_requests;
61249 extern unsigned int svcrdma_max_req_size;
61250
61251 -extern atomic_t rdma_stat_recv;
61252 -extern atomic_t rdma_stat_read;
61253 -extern atomic_t rdma_stat_write;
61254 -extern atomic_t rdma_stat_sq_starve;
61255 -extern atomic_t rdma_stat_rq_starve;
61256 -extern atomic_t rdma_stat_rq_poll;
61257 -extern atomic_t rdma_stat_rq_prod;
61258 -extern atomic_t rdma_stat_sq_poll;
61259 -extern atomic_t rdma_stat_sq_prod;
61260 +extern atomic_unchecked_t rdma_stat_recv;
61261 +extern atomic_unchecked_t rdma_stat_read;
61262 +extern atomic_unchecked_t rdma_stat_write;
61263 +extern atomic_unchecked_t rdma_stat_sq_starve;
61264 +extern atomic_unchecked_t rdma_stat_rq_starve;
61265 +extern atomic_unchecked_t rdma_stat_rq_poll;
61266 +extern atomic_unchecked_t rdma_stat_rq_prod;
61267 +extern atomic_unchecked_t rdma_stat_sq_poll;
61268 +extern atomic_unchecked_t rdma_stat_sq_prod;
61269
61270 #define RPCRDMA_VERSION 1
61271
61272 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
61273 index 703cfa3..0b8ca72ac 100644
61274 --- a/include/linux/sysctl.h
61275 +++ b/include/linux/sysctl.h
61276 @@ -155,7 +155,11 @@ enum
61277 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61278 };
61279
61280 -
61281 +#ifdef CONFIG_PAX_SOFTMODE
61282 +enum {
61283 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61284 +};
61285 +#endif
61286
61287 /* CTL_VM names: */
61288 enum
61289 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
61290
61291 extern int proc_dostring(struct ctl_table *, int,
61292 void __user *, size_t *, loff_t *);
61293 +extern int proc_dostring_modpriv(struct ctl_table *, int,
61294 + void __user *, size_t *, loff_t *);
61295 extern int proc_dointvec(struct ctl_table *, int,
61296 void __user *, size_t *, loff_t *);
61297 extern int proc_dointvec_minmax(struct ctl_table *, int,
61298 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
61299 index a71a292..51bd91d 100644
61300 --- a/include/linux/tracehook.h
61301 +++ b/include/linux/tracehook.h
61302 @@ -54,12 +54,12 @@ struct linux_binprm;
61303 /*
61304 * ptrace report for syscall entry and exit looks identical.
61305 */
61306 -static inline void ptrace_report_syscall(struct pt_regs *regs)
61307 +static inline int ptrace_report_syscall(struct pt_regs *regs)
61308 {
61309 int ptrace = current->ptrace;
61310
61311 if (!(ptrace & PT_PTRACED))
61312 - return;
61313 + return 0;
61314
61315 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
61316
61317 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61318 send_sig(current->exit_code, current, 1);
61319 current->exit_code = 0;
61320 }
61321 +
61322 + return fatal_signal_pending(current);
61323 }
61324
61325 /**
61326 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61327 static inline __must_check int tracehook_report_syscall_entry(
61328 struct pt_regs *regs)
61329 {
61330 - ptrace_report_syscall(regs);
61331 - return 0;
61332 + return ptrace_report_syscall(regs);
61333 }
61334
61335 /**
61336 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
61337 index ff7dc08..893e1bd 100644
61338 --- a/include/linux/tty_ldisc.h
61339 +++ b/include/linux/tty_ldisc.h
61340 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
61341
61342 struct module *owner;
61343
61344 - int refcount;
61345 + atomic_t refcount;
61346 };
61347
61348 struct tty_ldisc {
61349 diff --git a/include/linux/types.h b/include/linux/types.h
61350 index 57a9723..dbe234a 100644
61351 --- a/include/linux/types.h
61352 +++ b/include/linux/types.h
61353 @@ -213,10 +213,26 @@ typedef struct {
61354 int counter;
61355 } atomic_t;
61356
61357 +#ifdef CONFIG_PAX_REFCOUNT
61358 +typedef struct {
61359 + int counter;
61360 +} atomic_unchecked_t;
61361 +#else
61362 +typedef atomic_t atomic_unchecked_t;
61363 +#endif
61364 +
61365 #ifdef CONFIG_64BIT
61366 typedef struct {
61367 long counter;
61368 } atomic64_t;
61369 +
61370 +#ifdef CONFIG_PAX_REFCOUNT
61371 +typedef struct {
61372 + long counter;
61373 +} atomic64_unchecked_t;
61374 +#else
61375 +typedef atomic64_t atomic64_unchecked_t;
61376 +#endif
61377 #endif
61378
61379 struct list_head {
61380 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
61381 index 5ca0951..ab496a5 100644
61382 --- a/include/linux/uaccess.h
61383 +++ b/include/linux/uaccess.h
61384 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
61385 long ret; \
61386 mm_segment_t old_fs = get_fs(); \
61387 \
61388 - set_fs(KERNEL_DS); \
61389 pagefault_disable(); \
61390 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61391 - pagefault_enable(); \
61392 + set_fs(KERNEL_DS); \
61393 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
61394 set_fs(old_fs); \
61395 + pagefault_enable(); \
61396 ret; \
61397 })
61398
61399 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
61400 index 99c1b4d..bb94261 100644
61401 --- a/include/linux/unaligned/access_ok.h
61402 +++ b/include/linux/unaligned/access_ok.h
61403 @@ -6,32 +6,32 @@
61404
61405 static inline u16 get_unaligned_le16(const void *p)
61406 {
61407 - return le16_to_cpup((__le16 *)p);
61408 + return le16_to_cpup((const __le16 *)p);
61409 }
61410
61411 static inline u32 get_unaligned_le32(const void *p)
61412 {
61413 - return le32_to_cpup((__le32 *)p);
61414 + return le32_to_cpup((const __le32 *)p);
61415 }
61416
61417 static inline u64 get_unaligned_le64(const void *p)
61418 {
61419 - return le64_to_cpup((__le64 *)p);
61420 + return le64_to_cpup((const __le64 *)p);
61421 }
61422
61423 static inline u16 get_unaligned_be16(const void *p)
61424 {
61425 - return be16_to_cpup((__be16 *)p);
61426 + return be16_to_cpup((const __be16 *)p);
61427 }
61428
61429 static inline u32 get_unaligned_be32(const void *p)
61430 {
61431 - return be32_to_cpup((__be32 *)p);
61432 + return be32_to_cpup((const __be32 *)p);
61433 }
61434
61435 static inline u64 get_unaligned_be64(const void *p)
61436 {
61437 - return be64_to_cpup((__be64 *)p);
61438 + return be64_to_cpup((const __be64 *)p);
61439 }
61440
61441 static inline void put_unaligned_le16(u16 val, void *p)
61442 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61443 index e5a40c3..20ab0f6 100644
61444 --- a/include/linux/usb/renesas_usbhs.h
61445 +++ b/include/linux/usb/renesas_usbhs.h
61446 @@ -39,7 +39,7 @@ enum {
61447 */
61448 struct renesas_usbhs_driver_callback {
61449 int (*notify_hotplug)(struct platform_device *pdev);
61450 -};
61451 +} __no_const;
61452
61453 /*
61454 * callback functions for platform
61455 @@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61456 * VBUS control is needed for Host
61457 */
61458 int (*set_vbus)(struct platform_device *pdev, int enable);
61459 -};
61460 +} __no_const;
61461
61462 /*
61463 * parameters for renesas usbhs
61464 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61465 index 6f8fbcf..8259001 100644
61466 --- a/include/linux/vermagic.h
61467 +++ b/include/linux/vermagic.h
61468 @@ -25,9 +25,35 @@
61469 #define MODULE_ARCH_VERMAGIC ""
61470 #endif
61471
61472 +#ifdef CONFIG_PAX_REFCOUNT
61473 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
61474 +#else
61475 +#define MODULE_PAX_REFCOUNT ""
61476 +#endif
61477 +
61478 +#ifdef CONSTIFY_PLUGIN
61479 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61480 +#else
61481 +#define MODULE_CONSTIFY_PLUGIN ""
61482 +#endif
61483 +
61484 +#ifdef STACKLEAK_PLUGIN
61485 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61486 +#else
61487 +#define MODULE_STACKLEAK_PLUGIN ""
61488 +#endif
61489 +
61490 +#ifdef CONFIG_GRKERNSEC
61491 +#define MODULE_GRSEC "GRSEC "
61492 +#else
61493 +#define MODULE_GRSEC ""
61494 +#endif
61495 +
61496 #define VERMAGIC_STRING \
61497 UTS_RELEASE " " \
61498 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61499 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61500 - MODULE_ARCH_VERMAGIC
61501 + MODULE_ARCH_VERMAGIC \
61502 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61503 + MODULE_GRSEC
61504
61505 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61506 index 4bde182..aec92c1 100644
61507 --- a/include/linux/vmalloc.h
61508 +++ b/include/linux/vmalloc.h
61509 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61510 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61511 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61512 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61513 +
61514 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61515 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61516 +#endif
61517 +
61518 /* bits [20..32] reserved for arch specific ioremap internals */
61519
61520 /*
61521 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61522 # endif
61523 #endif
61524
61525 +#define vmalloc(x) \
61526 +({ \
61527 + void *___retval; \
61528 + intoverflow_t ___x = (intoverflow_t)x; \
61529 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61530 + ___retval = NULL; \
61531 + else \
61532 + ___retval = vmalloc((unsigned long)___x); \
61533 + ___retval; \
61534 +})
61535 +
61536 +#define vzalloc(x) \
61537 +({ \
61538 + void *___retval; \
61539 + intoverflow_t ___x = (intoverflow_t)x; \
61540 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61541 + ___retval = NULL; \
61542 + else \
61543 + ___retval = vzalloc((unsigned long)___x); \
61544 + ___retval; \
61545 +})
61546 +
61547 +#define __vmalloc(x, y, z) \
61548 +({ \
61549 + void *___retval; \
61550 + intoverflow_t ___x = (intoverflow_t)x; \
61551 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61552 + ___retval = NULL; \
61553 + else \
61554 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61555 + ___retval; \
61556 +})
61557 +
61558 +#define vmalloc_user(x) \
61559 +({ \
61560 + void *___retval; \
61561 + intoverflow_t ___x = (intoverflow_t)x; \
61562 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61563 + ___retval = NULL; \
61564 + else \
61565 + ___retval = vmalloc_user((unsigned long)___x); \
61566 + ___retval; \
61567 +})
61568 +
61569 +#define vmalloc_exec(x) \
61570 +({ \
61571 + void *___retval; \
61572 + intoverflow_t ___x = (intoverflow_t)x; \
61573 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61574 + ___retval = NULL; \
61575 + else \
61576 + ___retval = vmalloc_exec((unsigned long)___x); \
61577 + ___retval; \
61578 +})
61579 +
61580 +#define vmalloc_node(x, y) \
61581 +({ \
61582 + void *___retval; \
61583 + intoverflow_t ___x = (intoverflow_t)x; \
61584 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61585 + ___retval = NULL; \
61586 + else \
61587 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61588 + ___retval; \
61589 +})
61590 +
61591 +#define vzalloc_node(x, y) \
61592 +({ \
61593 + void *___retval; \
61594 + intoverflow_t ___x = (intoverflow_t)x; \
61595 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61596 + ___retval = NULL; \
61597 + else \
61598 + ___retval = vzalloc_node((unsigned long)___x, (y));\
61599 + ___retval; \
61600 +})
61601 +
61602 +#define vmalloc_32(x) \
61603 +({ \
61604 + void *___retval; \
61605 + intoverflow_t ___x = (intoverflow_t)x; \
61606 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61607 + ___retval = NULL; \
61608 + else \
61609 + ___retval = vmalloc_32((unsigned long)___x); \
61610 + ___retval; \
61611 +})
61612 +
61613 +#define vmalloc_32_user(x) \
61614 +({ \
61615 +void *___retval; \
61616 + intoverflow_t ___x = (intoverflow_t)x; \
61617 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61618 + ___retval = NULL; \
61619 + else \
61620 + ___retval = vmalloc_32_user((unsigned long)___x);\
61621 + ___retval; \
61622 +})
61623 +
61624 #endif /* _LINUX_VMALLOC_H */
61625 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61626 index 65efb92..137adbb 100644
61627 --- a/include/linux/vmstat.h
61628 +++ b/include/linux/vmstat.h
61629 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61630 /*
61631 * Zone based page accounting with per cpu differentials.
61632 */
61633 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61634 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61635
61636 static inline void zone_page_state_add(long x, struct zone *zone,
61637 enum zone_stat_item item)
61638 {
61639 - atomic_long_add(x, &zone->vm_stat[item]);
61640 - atomic_long_add(x, &vm_stat[item]);
61641 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61642 + atomic_long_add_unchecked(x, &vm_stat[item]);
61643 }
61644
61645 static inline unsigned long global_page_state(enum zone_stat_item item)
61646 {
61647 - long x = atomic_long_read(&vm_stat[item]);
61648 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61649 #ifdef CONFIG_SMP
61650 if (x < 0)
61651 x = 0;
61652 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61653 static inline unsigned long zone_page_state(struct zone *zone,
61654 enum zone_stat_item item)
61655 {
61656 - long x = atomic_long_read(&zone->vm_stat[item]);
61657 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61658 #ifdef CONFIG_SMP
61659 if (x < 0)
61660 x = 0;
61661 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61662 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61663 enum zone_stat_item item)
61664 {
61665 - long x = atomic_long_read(&zone->vm_stat[item]);
61666 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61667
61668 #ifdef CONFIG_SMP
61669 int cpu;
61670 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61671
61672 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61673 {
61674 - atomic_long_inc(&zone->vm_stat[item]);
61675 - atomic_long_inc(&vm_stat[item]);
61676 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61677 + atomic_long_inc_unchecked(&vm_stat[item]);
61678 }
61679
61680 static inline void __inc_zone_page_state(struct page *page,
61681 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61682
61683 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61684 {
61685 - atomic_long_dec(&zone->vm_stat[item]);
61686 - atomic_long_dec(&vm_stat[item]);
61687 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61688 + atomic_long_dec_unchecked(&vm_stat[item]);
61689 }
61690
61691 static inline void __dec_zone_page_state(struct page *page,
61692 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61693 index e5d1220..ef6e406 100644
61694 --- a/include/linux/xattr.h
61695 +++ b/include/linux/xattr.h
61696 @@ -57,6 +57,11 @@
61697 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61698 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61699
61700 +/* User namespace */
61701 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61702 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
61703 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61704 +
61705 #ifdef __KERNEL__
61706
61707 #include <linux/types.h>
61708 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61709 index 4aeff96..b378cdc 100644
61710 --- a/include/media/saa7146_vv.h
61711 +++ b/include/media/saa7146_vv.h
61712 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
61713 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61714
61715 /* the extension can override this */
61716 - struct v4l2_ioctl_ops ops;
61717 + v4l2_ioctl_ops_no_const ops;
61718 /* pointer to the saa7146 core ops */
61719 const struct v4l2_ioctl_ops *core_ops;
61720
61721 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61722 index c7c40f1..4f01585 100644
61723 --- a/include/media/v4l2-dev.h
61724 +++ b/include/media/v4l2-dev.h
61725 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61726
61727
61728 struct v4l2_file_operations {
61729 - struct module *owner;
61730 + struct module * const owner;
61731 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61732 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61733 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61734 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
61735 int (*open) (struct file *);
61736 int (*release) (struct file *);
61737 };
61738 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61739
61740 /*
61741 * Newer version of video_device, handled by videodev2.c
61742 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61743 index 4d1c74a..65e1221 100644
61744 --- a/include/media/v4l2-ioctl.h
61745 +++ b/include/media/v4l2-ioctl.h
61746 @@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61747 long (*vidioc_default) (struct file *file, void *fh,
61748 bool valid_prio, int cmd, void *arg);
61749 };
61750 -
61751 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61752
61753 /* v4l debugging and diagnostics */
61754
61755 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61756 index 8d55251..dfe5b0a 100644
61757 --- a/include/net/caif/caif_hsi.h
61758 +++ b/include/net/caif/caif_hsi.h
61759 @@ -98,7 +98,7 @@ struct cfhsi_drv {
61760 void (*rx_done_cb) (struct cfhsi_drv *drv);
61761 void (*wake_up_cb) (struct cfhsi_drv *drv);
61762 void (*wake_down_cb) (struct cfhsi_drv *drv);
61763 -};
61764 +} __no_const;
61765
61766 /* Structure implemented by HSI device. */
61767 struct cfhsi_dev {
61768 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61769 index 9e5425b..8136ffc 100644
61770 --- a/include/net/caif/cfctrl.h
61771 +++ b/include/net/caif/cfctrl.h
61772 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
61773 void (*radioset_rsp)(void);
61774 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61775 struct cflayer *client_layer);
61776 -};
61777 +} __no_const;
61778
61779 /* Link Setup Parameters for CAIF-Links. */
61780 struct cfctrl_link_param {
61781 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
61782 struct cfctrl {
61783 struct cfsrvl serv;
61784 struct cfctrl_rsp res;
61785 - atomic_t req_seq_no;
61786 - atomic_t rsp_seq_no;
61787 + atomic_unchecked_t req_seq_no;
61788 + atomic_unchecked_t rsp_seq_no;
61789 struct list_head list;
61790 /* Protects from simultaneous access to first_req list */
61791 spinlock_t info_list_lock;
61792 diff --git a/include/net/flow.h b/include/net/flow.h
61793 index 57f15a7..0de26c6 100644
61794 --- a/include/net/flow.h
61795 +++ b/include/net/flow.h
61796 @@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61797
61798 extern void flow_cache_flush(void);
61799 extern void flow_cache_flush_deferred(void);
61800 -extern atomic_t flow_cache_genid;
61801 +extern atomic_unchecked_t flow_cache_genid;
61802
61803 #endif
61804 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61805 index e9ff3fc..9d3e5c7 100644
61806 --- a/include/net/inetpeer.h
61807 +++ b/include/net/inetpeer.h
61808 @@ -48,8 +48,8 @@ struct inet_peer {
61809 */
61810 union {
61811 struct {
61812 - atomic_t rid; /* Frag reception counter */
61813 - atomic_t ip_id_count; /* IP ID for the next packet */
61814 + atomic_unchecked_t rid; /* Frag reception counter */
61815 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61816 __u32 tcp_ts;
61817 __u32 tcp_ts_stamp;
61818 };
61819 @@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61820 more++;
61821 inet_peer_refcheck(p);
61822 do {
61823 - old = atomic_read(&p->ip_id_count);
61824 + old = atomic_read_unchecked(&p->ip_id_count);
61825 new = old + more;
61826 if (!new)
61827 new = 1;
61828 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61829 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61830 return new;
61831 }
61832
61833 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61834 index 10422ef..662570f 100644
61835 --- a/include/net/ip_fib.h
61836 +++ b/include/net/ip_fib.h
61837 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61838
61839 #define FIB_RES_SADDR(net, res) \
61840 ((FIB_RES_NH(res).nh_saddr_genid == \
61841 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61842 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61843 FIB_RES_NH(res).nh_saddr : \
61844 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61845 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61846 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61847 index e5a7b9a..f4fc44b 100644
61848 --- a/include/net/ip_vs.h
61849 +++ b/include/net/ip_vs.h
61850 @@ -509,7 +509,7 @@ struct ip_vs_conn {
61851 struct ip_vs_conn *control; /* Master control connection */
61852 atomic_t n_control; /* Number of controlled ones */
61853 struct ip_vs_dest *dest; /* real server */
61854 - atomic_t in_pkts; /* incoming packet counter */
61855 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61856
61857 /* packet transmitter for different forwarding methods. If it
61858 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61859 @@ -647,7 +647,7 @@ struct ip_vs_dest {
61860 __be16 port; /* port number of the server */
61861 union nf_inet_addr addr; /* IP address of the server */
61862 volatile unsigned flags; /* dest status flags */
61863 - atomic_t conn_flags; /* flags to copy to conn */
61864 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61865 atomic_t weight; /* server weight */
61866
61867 atomic_t refcnt; /* reference counter */
61868 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61869 index 69b610a..fe3962c 100644
61870 --- a/include/net/irda/ircomm_core.h
61871 +++ b/include/net/irda/ircomm_core.h
61872 @@ -51,7 +51,7 @@ typedef struct {
61873 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61874 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61875 struct ircomm_info *);
61876 -} call_t;
61877 +} __no_const call_t;
61878
61879 struct ircomm_cb {
61880 irda_queue_t queue;
61881 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61882 index 59ba38bc..d515662 100644
61883 --- a/include/net/irda/ircomm_tty.h
61884 +++ b/include/net/irda/ircomm_tty.h
61885 @@ -35,6 +35,7 @@
61886 #include <linux/termios.h>
61887 #include <linux/timer.h>
61888 #include <linux/tty.h> /* struct tty_struct */
61889 +#include <asm/local.h>
61890
61891 #include <net/irda/irias_object.h>
61892 #include <net/irda/ircomm_core.h>
61893 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61894 unsigned short close_delay;
61895 unsigned short closing_wait; /* time to wait before closing */
61896
61897 - int open_count;
61898 - int blocked_open; /* # of blocked opens */
61899 + local_t open_count;
61900 + local_t blocked_open; /* # of blocked opens */
61901
61902 /* Protect concurent access to :
61903 * o self->open_count
61904 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61905 index f2419cf..473679f 100644
61906 --- a/include/net/iucv/af_iucv.h
61907 +++ b/include/net/iucv/af_iucv.h
61908 @@ -139,7 +139,7 @@ struct iucv_sock {
61909 struct iucv_sock_list {
61910 struct hlist_head head;
61911 rwlock_t lock;
61912 - atomic_t autobind_name;
61913 + atomic_unchecked_t autobind_name;
61914 };
61915
61916 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61917 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61918 index 2720884..3aa5c25 100644
61919 --- a/include/net/neighbour.h
61920 +++ b/include/net/neighbour.h
61921 @@ -122,7 +122,7 @@ struct neigh_ops {
61922 void (*error_report)(struct neighbour *, struct sk_buff *);
61923 int (*output)(struct neighbour *, struct sk_buff *);
61924 int (*connected_output)(struct neighbour *, struct sk_buff *);
61925 -};
61926 +} __do_const;
61927
61928 struct pneigh_entry {
61929 struct pneigh_entry *next;
61930 diff --git a/include/net/netlink.h b/include/net/netlink.h
61931 index cb1f350..3279d2c 100644
61932 --- a/include/net/netlink.h
61933 +++ b/include/net/netlink.h
61934 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61935 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61936 {
61937 if (mark)
61938 - skb_trim(skb, (unsigned char *) mark - skb->data);
61939 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61940 }
61941
61942 /**
61943 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61944 index d786b4f..4c3dd41 100644
61945 --- a/include/net/netns/ipv4.h
61946 +++ b/include/net/netns/ipv4.h
61947 @@ -56,8 +56,8 @@ struct netns_ipv4 {
61948
61949 unsigned int sysctl_ping_group_range[2];
61950
61951 - atomic_t rt_genid;
61952 - atomic_t dev_addr_genid;
61953 + atomic_unchecked_t rt_genid;
61954 + atomic_unchecked_t dev_addr_genid;
61955
61956 #ifdef CONFIG_IP_MROUTE
61957 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61958 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61959 index 6a72a58..e6a127d 100644
61960 --- a/include/net/sctp/sctp.h
61961 +++ b/include/net/sctp/sctp.h
61962 @@ -318,9 +318,9 @@ do { \
61963
61964 #else /* SCTP_DEBUG */
61965
61966 -#define SCTP_DEBUG_PRINTK(whatever...)
61967 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61968 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61969 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61970 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61971 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61972 #define SCTP_ENABLE_DEBUG
61973 #define SCTP_DISABLE_DEBUG
61974 #define SCTP_ASSERT(expr, str, func)
61975 diff --git a/include/net/sock.h b/include/net/sock.h
61976 index 32e3937..87a1dbc 100644
61977 --- a/include/net/sock.h
61978 +++ b/include/net/sock.h
61979 @@ -277,7 +277,7 @@ struct sock {
61980 #ifdef CONFIG_RPS
61981 __u32 sk_rxhash;
61982 #endif
61983 - atomic_t sk_drops;
61984 + atomic_unchecked_t sk_drops;
61985 int sk_rcvbuf;
61986
61987 struct sk_filter __rcu *sk_filter;
61988 @@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61989 }
61990
61991 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61992 - char __user *from, char *to,
61993 + char __user *from, unsigned char *to,
61994 int copy, int offset)
61995 {
61996 if (skb->ip_summed == CHECKSUM_NONE) {
61997 diff --git a/include/net/tcp.h b/include/net/tcp.h
61998 index bb18c4d..bb87972 100644
61999 --- a/include/net/tcp.h
62000 +++ b/include/net/tcp.h
62001 @@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
62002 char *name;
62003 sa_family_t family;
62004 const struct file_operations *seq_fops;
62005 - struct seq_operations seq_ops;
62006 + seq_operations_no_const seq_ops;
62007 };
62008
62009 struct tcp_iter_state {
62010 diff --git a/include/net/udp.h b/include/net/udp.h
62011 index 3b285f4..0219639 100644
62012 --- a/include/net/udp.h
62013 +++ b/include/net/udp.h
62014 @@ -237,7 +237,7 @@ struct udp_seq_afinfo {
62015 sa_family_t family;
62016 struct udp_table *udp_table;
62017 const struct file_operations *seq_fops;
62018 - struct seq_operations seq_ops;
62019 + seq_operations_no_const seq_ops;
62020 };
62021
62022 struct udp_iter_state {
62023 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
62024 index b203e14..1df3991 100644
62025 --- a/include/net/xfrm.h
62026 +++ b/include/net/xfrm.h
62027 @@ -505,7 +505,7 @@ struct xfrm_policy {
62028 struct timer_list timer;
62029
62030 struct flow_cache_object flo;
62031 - atomic_t genid;
62032 + atomic_unchecked_t genid;
62033 u32 priority;
62034 u32 index;
62035 struct xfrm_mark mark;
62036 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
62037 index 1a046b1..ee0bef0 100644
62038 --- a/include/rdma/iw_cm.h
62039 +++ b/include/rdma/iw_cm.h
62040 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
62041 int backlog);
62042
62043 int (*destroy_listen)(struct iw_cm_id *cm_id);
62044 -};
62045 +} __no_const;
62046
62047 /**
62048 * iw_create_cm_id - Create an IW CM identifier.
62049 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
62050 index 5d1a758..1dbf795 100644
62051 --- a/include/scsi/libfc.h
62052 +++ b/include/scsi/libfc.h
62053 @@ -748,6 +748,7 @@ struct libfc_function_template {
62054 */
62055 void (*disc_stop_final) (struct fc_lport *);
62056 };
62057 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
62058
62059 /**
62060 * struct fc_disc - Discovery context
62061 @@ -851,7 +852,7 @@ struct fc_lport {
62062 struct fc_vport *vport;
62063
62064 /* Operational Information */
62065 - struct libfc_function_template tt;
62066 + libfc_function_template_no_const tt;
62067 u8 link_up;
62068 u8 qfull;
62069 enum fc_lport_state state;
62070 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
62071 index 5591ed5..13eb457 100644
62072 --- a/include/scsi/scsi_device.h
62073 +++ b/include/scsi/scsi_device.h
62074 @@ -161,9 +161,9 @@ struct scsi_device {
62075 unsigned int max_device_blocked; /* what device_blocked counts down from */
62076 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
62077
62078 - atomic_t iorequest_cnt;
62079 - atomic_t iodone_cnt;
62080 - atomic_t ioerr_cnt;
62081 + atomic_unchecked_t iorequest_cnt;
62082 + atomic_unchecked_t iodone_cnt;
62083 + atomic_unchecked_t ioerr_cnt;
62084
62085 struct device sdev_gendev,
62086 sdev_dev;
62087 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
62088 index 2a65167..91e01f8 100644
62089 --- a/include/scsi/scsi_transport_fc.h
62090 +++ b/include/scsi/scsi_transport_fc.h
62091 @@ -711,7 +711,7 @@ struct fc_function_template {
62092 unsigned long show_host_system_hostname:1;
62093
62094 unsigned long disable_target_scan:1;
62095 -};
62096 +} __do_const;
62097
62098
62099 /**
62100 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
62101 index 030b87c..98a6954 100644
62102 --- a/include/sound/ak4xxx-adda.h
62103 +++ b/include/sound/ak4xxx-adda.h
62104 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
62105 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
62106 unsigned char val);
62107 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
62108 -};
62109 +} __no_const;
62110
62111 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
62112
62113 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
62114 index 8c05e47..2b5df97 100644
62115 --- a/include/sound/hwdep.h
62116 +++ b/include/sound/hwdep.h
62117 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
62118 struct snd_hwdep_dsp_status *status);
62119 int (*dsp_load)(struct snd_hwdep *hw,
62120 struct snd_hwdep_dsp_image *image);
62121 -};
62122 +} __no_const;
62123
62124 struct snd_hwdep {
62125 struct snd_card *card;
62126 diff --git a/include/sound/info.h b/include/sound/info.h
62127 index 5492cc4..1a65278 100644
62128 --- a/include/sound/info.h
62129 +++ b/include/sound/info.h
62130 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
62131 struct snd_info_buffer *buffer);
62132 void (*write)(struct snd_info_entry *entry,
62133 struct snd_info_buffer *buffer);
62134 -};
62135 +} __no_const;
62136
62137 struct snd_info_entry_ops {
62138 int (*open)(struct snd_info_entry *entry,
62139 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
62140 index 0cf91b2..b70cae4 100644
62141 --- a/include/sound/pcm.h
62142 +++ b/include/sound/pcm.h
62143 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
62144 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
62145 int (*ack)(struct snd_pcm_substream *substream);
62146 };
62147 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
62148
62149 /*
62150 *
62151 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
62152 index af1b49e..a5d55a5 100644
62153 --- a/include/sound/sb16_csp.h
62154 +++ b/include/sound/sb16_csp.h
62155 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
62156 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
62157 int (*csp_stop) (struct snd_sb_csp * p);
62158 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
62159 -};
62160 +} __no_const;
62161
62162 /*
62163 * CSP private data
62164 diff --git a/include/sound/soc.h b/include/sound/soc.h
62165 index 11cfb59..e3f93f4 100644
62166 --- a/include/sound/soc.h
62167 +++ b/include/sound/soc.h
62168 @@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
62169 /* platform IO - used for platform DAPM */
62170 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
62171 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
62172 -};
62173 +} __do_const;
62174
62175 struct snd_soc_platform {
62176 const char *name;
62177 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
62178 index 444cd6b..3327cc5 100644
62179 --- a/include/sound/ymfpci.h
62180 +++ b/include/sound/ymfpci.h
62181 @@ -358,7 +358,7 @@ struct snd_ymfpci {
62182 spinlock_t reg_lock;
62183 spinlock_t voice_lock;
62184 wait_queue_head_t interrupt_sleep;
62185 - atomic_t interrupt_sleep_count;
62186 + atomic_unchecked_t interrupt_sleep_count;
62187 struct snd_info_entry *proc_entry;
62188 const struct firmware *dsp_microcode;
62189 const struct firmware *controller_microcode;
62190 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
62191 index a79886c..b483af6 100644
62192 --- a/include/target/target_core_base.h
62193 +++ b/include/target/target_core_base.h
62194 @@ -346,7 +346,7 @@ struct t10_reservation_ops {
62195 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
62196 int (*t10_pr_register)(struct se_cmd *);
62197 int (*t10_pr_clear)(struct se_cmd *);
62198 -};
62199 +} __no_const;
62200
62201 struct t10_reservation {
62202 /* Reservation effects all target ports */
62203 @@ -465,8 +465,8 @@ struct se_cmd {
62204 atomic_t t_se_count;
62205 atomic_t t_task_cdbs_left;
62206 atomic_t t_task_cdbs_ex_left;
62207 - atomic_t t_task_cdbs_sent;
62208 - atomic_t t_transport_aborted;
62209 + atomic_unchecked_t t_task_cdbs_sent;
62210 + atomic_unchecked_t t_transport_aborted;
62211 atomic_t t_transport_active;
62212 atomic_t t_transport_complete;
62213 atomic_t t_transport_queue_active;
62214 @@ -704,7 +704,7 @@ struct se_device {
62215 /* Active commands on this virtual SE device */
62216 atomic_t simple_cmds;
62217 atomic_t depth_left;
62218 - atomic_t dev_ordered_id;
62219 + atomic_unchecked_t dev_ordered_id;
62220 atomic_t execute_tasks;
62221 atomic_t dev_ordered_sync;
62222 atomic_t dev_qf_count;
62223 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
62224 index 1c09820..7f5ec79 100644
62225 --- a/include/trace/events/irq.h
62226 +++ b/include/trace/events/irq.h
62227 @@ -36,7 +36,7 @@ struct softirq_action;
62228 */
62229 TRACE_EVENT(irq_handler_entry,
62230
62231 - TP_PROTO(int irq, struct irqaction *action),
62232 + TP_PROTO(int irq, const struct irqaction *action),
62233
62234 TP_ARGS(irq, action),
62235
62236 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
62237 */
62238 TRACE_EVENT(irq_handler_exit,
62239
62240 - TP_PROTO(int irq, struct irqaction *action, int ret),
62241 + TP_PROTO(int irq, const struct irqaction *action, int ret),
62242
62243 TP_ARGS(irq, action, ret),
62244
62245 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
62246 index c41f308..6918de3 100644
62247 --- a/include/video/udlfb.h
62248 +++ b/include/video/udlfb.h
62249 @@ -52,10 +52,10 @@ struct dlfb_data {
62250 u32 pseudo_palette[256];
62251 int blank_mode; /*one of FB_BLANK_ */
62252 /* blit-only rendering path metrics, exposed through sysfs */
62253 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62254 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
62255 - atomic_t bytes_sent; /* to usb, after compression including overhead */
62256 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
62257 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62258 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
62259 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
62260 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
62261 };
62262
62263 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
62264 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
62265 index 0993a22..32ba2fe 100644
62266 --- a/include/video/uvesafb.h
62267 +++ b/include/video/uvesafb.h
62268 @@ -177,6 +177,7 @@ struct uvesafb_par {
62269 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
62270 u8 pmi_setpal; /* PMI for palette changes */
62271 u16 *pmi_base; /* protected mode interface location */
62272 + u8 *pmi_code; /* protected mode code location */
62273 void *pmi_start;
62274 void *pmi_pal;
62275 u8 *vbe_state_orig; /*
62276 diff --git a/init/Kconfig b/init/Kconfig
62277 index 43298f9..2f56c12 100644
62278 --- a/init/Kconfig
62279 +++ b/init/Kconfig
62280 @@ -1214,7 +1214,7 @@ config SLUB_DEBUG
62281
62282 config COMPAT_BRK
62283 bool "Disable heap randomization"
62284 - default y
62285 + default n
62286 help
62287 Randomizing heap placement makes heap exploits harder, but it
62288 also breaks ancient binaries (including anything libc5 based).
62289 diff --git a/init/do_mounts.c b/init/do_mounts.c
62290 index db6e5ee..7677ff7 100644
62291 --- a/init/do_mounts.c
62292 +++ b/init/do_mounts.c
62293 @@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
62294
62295 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62296 {
62297 - int err = sys_mount(name, "/root", fs, flags, data);
62298 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
62299 if (err)
62300 return err;
62301
62302 - sys_chdir((const char __user __force *)"/root");
62303 + sys_chdir((const char __force_user*)"/root");
62304 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62305 printk(KERN_INFO
62306 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62307 @@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
62308 va_start(args, fmt);
62309 vsprintf(buf, fmt, args);
62310 va_end(args);
62311 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62312 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62313 if (fd >= 0) {
62314 sys_ioctl(fd, FDEJECT, 0);
62315 sys_close(fd);
62316 }
62317 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62318 - fd = sys_open("/dev/console", O_RDWR, 0);
62319 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
62320 if (fd >= 0) {
62321 sys_ioctl(fd, TCGETS, (long)&termios);
62322 termios.c_lflag &= ~ICANON;
62323 sys_ioctl(fd, TCSETSF, (long)&termios);
62324 - sys_read(fd, &c, 1);
62325 + sys_read(fd, (char __user *)&c, 1);
62326 termios.c_lflag |= ICANON;
62327 sys_ioctl(fd, TCSETSF, (long)&termios);
62328 sys_close(fd);
62329 @@ -553,6 +553,6 @@ void __init prepare_namespace(void)
62330 mount_root();
62331 out:
62332 devtmpfs_mount("dev");
62333 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62334 - sys_chroot((const char __user __force *)".");
62335 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62336 + sys_chroot((const char __force_user *)".");
62337 }
62338 diff --git a/init/do_mounts.h b/init/do_mounts.h
62339 index f5b978a..69dbfe8 100644
62340 --- a/init/do_mounts.h
62341 +++ b/init/do_mounts.h
62342 @@ -15,15 +15,15 @@ extern int root_mountflags;
62343
62344 static inline int create_dev(char *name, dev_t dev)
62345 {
62346 - sys_unlink(name);
62347 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62348 + sys_unlink((char __force_user *)name);
62349 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
62350 }
62351
62352 #if BITS_PER_LONG == 32
62353 static inline u32 bstat(char *name)
62354 {
62355 struct stat64 stat;
62356 - if (sys_stat64(name, &stat) != 0)
62357 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
62358 return 0;
62359 if (!S_ISBLK(stat.st_mode))
62360 return 0;
62361 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
62362 static inline u32 bstat(char *name)
62363 {
62364 struct stat stat;
62365 - if (sys_newstat(name, &stat) != 0)
62366 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
62367 return 0;
62368 if (!S_ISBLK(stat.st_mode))
62369 return 0;
62370 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
62371 index 3098a38..253064e 100644
62372 --- a/init/do_mounts_initrd.c
62373 +++ b/init/do_mounts_initrd.c
62374 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
62375 create_dev("/dev/root.old", Root_RAM0);
62376 /* mount initrd on rootfs' /root */
62377 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62378 - sys_mkdir("/old", 0700);
62379 - root_fd = sys_open("/", 0, 0);
62380 - old_fd = sys_open("/old", 0, 0);
62381 + sys_mkdir((const char __force_user *)"/old", 0700);
62382 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
62383 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
62384 /* move initrd over / and chdir/chroot in initrd root */
62385 - sys_chdir("/root");
62386 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62387 - sys_chroot(".");
62388 + sys_chdir((const char __force_user *)"/root");
62389 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62390 + sys_chroot((const char __force_user *)".");
62391
62392 /*
62393 * In case that a resume from disk is carried out by linuxrc or one of
62394 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
62395
62396 /* move initrd to rootfs' /old */
62397 sys_fchdir(old_fd);
62398 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
62399 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
62400 /* switch root and cwd back to / of rootfs */
62401 sys_fchdir(root_fd);
62402 - sys_chroot(".");
62403 + sys_chroot((const char __force_user *)".");
62404 sys_close(old_fd);
62405 sys_close(root_fd);
62406
62407 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62408 - sys_chdir("/old");
62409 + sys_chdir((const char __force_user *)"/old");
62410 return;
62411 }
62412
62413 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62414 mount_root();
62415
62416 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62417 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62418 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62419 if (!error)
62420 printk("okay\n");
62421 else {
62422 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
62423 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62424 if (error == -ENOENT)
62425 printk("/initrd does not exist. Ignored.\n");
62426 else
62427 printk("failed\n");
62428 printk(KERN_NOTICE "Unmounting old root\n");
62429 - sys_umount("/old", MNT_DETACH);
62430 + sys_umount((char __force_user *)"/old", MNT_DETACH);
62431 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62432 if (fd < 0) {
62433 error = fd;
62434 @@ -116,11 +116,11 @@ int __init initrd_load(void)
62435 * mounted in the normal path.
62436 */
62437 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62438 - sys_unlink("/initrd.image");
62439 + sys_unlink((const char __force_user *)"/initrd.image");
62440 handle_initrd();
62441 return 1;
62442 }
62443 }
62444 - sys_unlink("/initrd.image");
62445 + sys_unlink((const char __force_user *)"/initrd.image");
62446 return 0;
62447 }
62448 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62449 index 32c4799..c27ee74 100644
62450 --- a/init/do_mounts_md.c
62451 +++ b/init/do_mounts_md.c
62452 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62453 partitioned ? "_d" : "", minor,
62454 md_setup_args[ent].device_names);
62455
62456 - fd = sys_open(name, 0, 0);
62457 + fd = sys_open((char __force_user *)name, 0, 0);
62458 if (fd < 0) {
62459 printk(KERN_ERR "md: open failed - cannot start "
62460 "array %s\n", name);
62461 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62462 * array without it
62463 */
62464 sys_close(fd);
62465 - fd = sys_open(name, 0, 0);
62466 + fd = sys_open((char __force_user *)name, 0, 0);
62467 sys_ioctl(fd, BLKRRPART, 0);
62468 }
62469 sys_close(fd);
62470 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62471
62472 wait_for_device_probe();
62473
62474 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62475 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62476 if (fd >= 0) {
62477 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62478 sys_close(fd);
62479 diff --git a/init/initramfs.c b/init/initramfs.c
62480 index 2531811..040d4d4 100644
62481 --- a/init/initramfs.c
62482 +++ b/init/initramfs.c
62483 @@ -74,7 +74,7 @@ static void __init free_hash(void)
62484 }
62485 }
62486
62487 -static long __init do_utime(char __user *filename, time_t mtime)
62488 +static long __init do_utime(__force char __user *filename, time_t mtime)
62489 {
62490 struct timespec t[2];
62491
62492 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
62493 struct dir_entry *de, *tmp;
62494 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62495 list_del(&de->list);
62496 - do_utime(de->name, de->mtime);
62497 + do_utime((char __force_user *)de->name, de->mtime);
62498 kfree(de->name);
62499 kfree(de);
62500 }
62501 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
62502 if (nlink >= 2) {
62503 char *old = find_link(major, minor, ino, mode, collected);
62504 if (old)
62505 - return (sys_link(old, collected) < 0) ? -1 : 1;
62506 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62507 }
62508 return 0;
62509 }
62510 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62511 {
62512 struct stat st;
62513
62514 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62515 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62516 if (S_ISDIR(st.st_mode))
62517 - sys_rmdir(path);
62518 + sys_rmdir((char __force_user *)path);
62519 else
62520 - sys_unlink(path);
62521 + sys_unlink((char __force_user *)path);
62522 }
62523 }
62524
62525 @@ -305,7 +305,7 @@ static int __init do_name(void)
62526 int openflags = O_WRONLY|O_CREAT;
62527 if (ml != 1)
62528 openflags |= O_TRUNC;
62529 - wfd = sys_open(collected, openflags, mode);
62530 + wfd = sys_open((char __force_user *)collected, openflags, mode);
62531
62532 if (wfd >= 0) {
62533 sys_fchown(wfd, uid, gid);
62534 @@ -317,17 +317,17 @@ static int __init do_name(void)
62535 }
62536 }
62537 } else if (S_ISDIR(mode)) {
62538 - sys_mkdir(collected, mode);
62539 - sys_chown(collected, uid, gid);
62540 - sys_chmod(collected, mode);
62541 + sys_mkdir((char __force_user *)collected, mode);
62542 + sys_chown((char __force_user *)collected, uid, gid);
62543 + sys_chmod((char __force_user *)collected, mode);
62544 dir_add(collected, mtime);
62545 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62546 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62547 if (maybe_link() == 0) {
62548 - sys_mknod(collected, mode, rdev);
62549 - sys_chown(collected, uid, gid);
62550 - sys_chmod(collected, mode);
62551 - do_utime(collected, mtime);
62552 + sys_mknod((char __force_user *)collected, mode, rdev);
62553 + sys_chown((char __force_user *)collected, uid, gid);
62554 + sys_chmod((char __force_user *)collected, mode);
62555 + do_utime((char __force_user *)collected, mtime);
62556 }
62557 }
62558 return 0;
62559 @@ -336,15 +336,15 @@ static int __init do_name(void)
62560 static int __init do_copy(void)
62561 {
62562 if (count >= body_len) {
62563 - sys_write(wfd, victim, body_len);
62564 + sys_write(wfd, (char __force_user *)victim, body_len);
62565 sys_close(wfd);
62566 - do_utime(vcollected, mtime);
62567 + do_utime((char __force_user *)vcollected, mtime);
62568 kfree(vcollected);
62569 eat(body_len);
62570 state = SkipIt;
62571 return 0;
62572 } else {
62573 - sys_write(wfd, victim, count);
62574 + sys_write(wfd, (char __force_user *)victim, count);
62575 body_len -= count;
62576 eat(count);
62577 return 1;
62578 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
62579 {
62580 collected[N_ALIGN(name_len) + body_len] = '\0';
62581 clean_path(collected, 0);
62582 - sys_symlink(collected + N_ALIGN(name_len), collected);
62583 - sys_lchown(collected, uid, gid);
62584 - do_utime(collected, mtime);
62585 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62586 + sys_lchown((char __force_user *)collected, uid, gid);
62587 + do_utime((char __force_user *)collected, mtime);
62588 state = SkipIt;
62589 next_state = Reset;
62590 return 0;
62591 diff --git a/init/main.c b/init/main.c
62592 index 217ed23..ec5406f 100644
62593 --- a/init/main.c
62594 +++ b/init/main.c
62595 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62596 extern void tc_init(void);
62597 #endif
62598
62599 +extern void grsecurity_init(void);
62600 +
62601 /*
62602 * Debug helper: via this flag we know that we are in 'early bootup code'
62603 * where only the boot processor is running with IRQ disabled. This means
62604 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62605
62606 __setup("reset_devices", set_reset_devices);
62607
62608 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62609 +extern char pax_enter_kernel_user[];
62610 +extern char pax_exit_kernel_user[];
62611 +extern pgdval_t clone_pgd_mask;
62612 +#endif
62613 +
62614 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62615 +static int __init setup_pax_nouderef(char *str)
62616 +{
62617 +#ifdef CONFIG_X86_32
62618 + unsigned int cpu;
62619 + struct desc_struct *gdt;
62620 +
62621 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
62622 + gdt = get_cpu_gdt_table(cpu);
62623 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62624 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62625 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62626 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62627 + }
62628 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62629 +#else
62630 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62631 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62632 + clone_pgd_mask = ~(pgdval_t)0UL;
62633 +#endif
62634 +
62635 + return 0;
62636 +}
62637 +early_param("pax_nouderef", setup_pax_nouderef);
62638 +#endif
62639 +
62640 +#ifdef CONFIG_PAX_SOFTMODE
62641 +int pax_softmode;
62642 +
62643 +static int __init setup_pax_softmode(char *str)
62644 +{
62645 + get_option(&str, &pax_softmode);
62646 + return 1;
62647 +}
62648 +__setup("pax_softmode=", setup_pax_softmode);
62649 +#endif
62650 +
62651 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62652 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62653 static const char *panic_later, *panic_param;
62654 @@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62655 {
62656 int count = preempt_count();
62657 int ret;
62658 + const char *msg1 = "", *msg2 = "";
62659
62660 if (initcall_debug)
62661 ret = do_one_initcall_debug(fn);
62662 @@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62663 sprintf(msgbuf, "error code %d ", ret);
62664
62665 if (preempt_count() != count) {
62666 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62667 + msg1 = " preemption imbalance";
62668 preempt_count() = count;
62669 }
62670 if (irqs_disabled()) {
62671 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62672 + msg2 = " disabled interrupts";
62673 local_irq_enable();
62674 }
62675 - if (msgbuf[0]) {
62676 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62677 + if (msgbuf[0] || *msg1 || *msg2) {
62678 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62679 }
62680
62681 return ret;
62682 @@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62683 do_basic_setup();
62684
62685 /* Open the /dev/console on the rootfs, this should never fail */
62686 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62687 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62688 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62689
62690 (void) sys_dup(0);
62691 @@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62692 if (!ramdisk_execute_command)
62693 ramdisk_execute_command = "/init";
62694
62695 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62696 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62697 ramdisk_execute_command = NULL;
62698 prepare_namespace();
62699 }
62700
62701 + grsecurity_init();
62702 +
62703 /*
62704 * Ok, we have completed the initial bootup, and
62705 * we're essentially up and running. Get rid of the
62706 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62707 index 5b4293d..f179875 100644
62708 --- a/ipc/mqueue.c
62709 +++ b/ipc/mqueue.c
62710 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62711 mq_bytes = (mq_msg_tblsz +
62712 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62713
62714 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62715 spin_lock(&mq_lock);
62716 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62717 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62718 diff --git a/ipc/msg.c b/ipc/msg.c
62719 index 7385de2..a8180e0 100644
62720 --- a/ipc/msg.c
62721 +++ b/ipc/msg.c
62722 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62723 return security_msg_queue_associate(msq, msgflg);
62724 }
62725
62726 +static struct ipc_ops msg_ops = {
62727 + .getnew = newque,
62728 + .associate = msg_security,
62729 + .more_checks = NULL
62730 +};
62731 +
62732 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62733 {
62734 struct ipc_namespace *ns;
62735 - struct ipc_ops msg_ops;
62736 struct ipc_params msg_params;
62737
62738 ns = current->nsproxy->ipc_ns;
62739
62740 - msg_ops.getnew = newque;
62741 - msg_ops.associate = msg_security;
62742 - msg_ops.more_checks = NULL;
62743 -
62744 msg_params.key = key;
62745 msg_params.flg = msgflg;
62746
62747 diff --git a/ipc/sem.c b/ipc/sem.c
62748 index 5215a81..cfc0cac 100644
62749 --- a/ipc/sem.c
62750 +++ b/ipc/sem.c
62751 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62752 return 0;
62753 }
62754
62755 +static struct ipc_ops sem_ops = {
62756 + .getnew = newary,
62757 + .associate = sem_security,
62758 + .more_checks = sem_more_checks
62759 +};
62760 +
62761 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62762 {
62763 struct ipc_namespace *ns;
62764 - struct ipc_ops sem_ops;
62765 struct ipc_params sem_params;
62766
62767 ns = current->nsproxy->ipc_ns;
62768 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62769 if (nsems < 0 || nsems > ns->sc_semmsl)
62770 return -EINVAL;
62771
62772 - sem_ops.getnew = newary;
62773 - sem_ops.associate = sem_security;
62774 - sem_ops.more_checks = sem_more_checks;
62775 -
62776 sem_params.key = key;
62777 sem_params.flg = semflg;
62778 sem_params.u.nsems = nsems;
62779 diff --git a/ipc/shm.c b/ipc/shm.c
62780 index b76be5b..859e750 100644
62781 --- a/ipc/shm.c
62782 +++ b/ipc/shm.c
62783 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62784 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62785 #endif
62786
62787 +#ifdef CONFIG_GRKERNSEC
62788 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62789 + const time_t shm_createtime, const uid_t cuid,
62790 + const int shmid);
62791 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62792 + const time_t shm_createtime);
62793 +#endif
62794 +
62795 void shm_init_ns(struct ipc_namespace *ns)
62796 {
62797 ns->shm_ctlmax = SHMMAX;
62798 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62799 shp->shm_lprid = 0;
62800 shp->shm_atim = shp->shm_dtim = 0;
62801 shp->shm_ctim = get_seconds();
62802 +#ifdef CONFIG_GRKERNSEC
62803 + {
62804 + struct timespec timeval;
62805 + do_posix_clock_monotonic_gettime(&timeval);
62806 +
62807 + shp->shm_createtime = timeval.tv_sec;
62808 + }
62809 +#endif
62810 shp->shm_segsz = size;
62811 shp->shm_nattch = 0;
62812 shp->shm_file = file;
62813 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62814 return 0;
62815 }
62816
62817 +static struct ipc_ops shm_ops = {
62818 + .getnew = newseg,
62819 + .associate = shm_security,
62820 + .more_checks = shm_more_checks
62821 +};
62822 +
62823 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62824 {
62825 struct ipc_namespace *ns;
62826 - struct ipc_ops shm_ops;
62827 struct ipc_params shm_params;
62828
62829 ns = current->nsproxy->ipc_ns;
62830
62831 - shm_ops.getnew = newseg;
62832 - shm_ops.associate = shm_security;
62833 - shm_ops.more_checks = shm_more_checks;
62834 -
62835 shm_params.key = key;
62836 shm_params.flg = shmflg;
62837 shm_params.u.size = size;
62838 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62839 f_mode = FMODE_READ | FMODE_WRITE;
62840 }
62841 if (shmflg & SHM_EXEC) {
62842 +
62843 +#ifdef CONFIG_PAX_MPROTECT
62844 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
62845 + goto out;
62846 +#endif
62847 +
62848 prot |= PROT_EXEC;
62849 acc_mode |= S_IXUGO;
62850 }
62851 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62852 if (err)
62853 goto out_unlock;
62854
62855 +#ifdef CONFIG_GRKERNSEC
62856 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62857 + shp->shm_perm.cuid, shmid) ||
62858 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62859 + err = -EACCES;
62860 + goto out_unlock;
62861 + }
62862 +#endif
62863 +
62864 path = shp->shm_file->f_path;
62865 path_get(&path);
62866 shp->shm_nattch++;
62867 +#ifdef CONFIG_GRKERNSEC
62868 + shp->shm_lapid = current->pid;
62869 +#endif
62870 size = i_size_read(path.dentry->d_inode);
62871 shm_unlock(shp);
62872
62873 diff --git a/kernel/acct.c b/kernel/acct.c
62874 index fa7eb3d..7faf116 100644
62875 --- a/kernel/acct.c
62876 +++ b/kernel/acct.c
62877 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62878 */
62879 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62880 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62881 - file->f_op->write(file, (char *)&ac,
62882 + file->f_op->write(file, (char __force_user *)&ac,
62883 sizeof(acct_t), &file->f_pos);
62884 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62885 set_fs(fs);
62886 diff --git a/kernel/audit.c b/kernel/audit.c
62887 index 09fae26..ed71d5b 100644
62888 --- a/kernel/audit.c
62889 +++ b/kernel/audit.c
62890 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62891 3) suppressed due to audit_rate_limit
62892 4) suppressed due to audit_backlog_limit
62893 */
62894 -static atomic_t audit_lost = ATOMIC_INIT(0);
62895 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62896
62897 /* The netlink socket. */
62898 static struct sock *audit_sock;
62899 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62900 unsigned long now;
62901 int print;
62902
62903 - atomic_inc(&audit_lost);
62904 + atomic_inc_unchecked(&audit_lost);
62905
62906 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62907
62908 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62909 printk(KERN_WARNING
62910 "audit: audit_lost=%d audit_rate_limit=%d "
62911 "audit_backlog_limit=%d\n",
62912 - atomic_read(&audit_lost),
62913 + atomic_read_unchecked(&audit_lost),
62914 audit_rate_limit,
62915 audit_backlog_limit);
62916 audit_panic(message);
62917 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62918 status_set.pid = audit_pid;
62919 status_set.rate_limit = audit_rate_limit;
62920 status_set.backlog_limit = audit_backlog_limit;
62921 - status_set.lost = atomic_read(&audit_lost);
62922 + status_set.lost = atomic_read_unchecked(&audit_lost);
62923 status_set.backlog = skb_queue_len(&audit_skb_queue);
62924 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62925 &status_set, sizeof(status_set));
62926 @@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62927 avail = audit_expand(ab,
62928 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62929 if (!avail)
62930 - goto out;
62931 + goto out_va_end;
62932 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62933 }
62934 - va_end(args2);
62935 if (len > 0)
62936 skb_put(skb, len);
62937 +out_va_end:
62938 + va_end(args2);
62939 out:
62940 return;
62941 }
62942 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62943 index 47b7fc1..c003c33 100644
62944 --- a/kernel/auditsc.c
62945 +++ b/kernel/auditsc.c
62946 @@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62947 struct audit_buffer **ab,
62948 struct audit_aux_data_execve *axi)
62949 {
62950 - int i;
62951 - size_t len, len_sent = 0;
62952 + int i, len;
62953 + size_t len_sent = 0;
62954 const char __user *p;
62955 char *buf;
62956
62957 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62958 }
62959
62960 /* global counter which is incremented every time something logs in */
62961 -static atomic_t session_id = ATOMIC_INIT(0);
62962 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62963
62964 /**
62965 * audit_set_loginuid - set a task's audit_context loginuid
62966 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62967 */
62968 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62969 {
62970 - unsigned int sessionid = atomic_inc_return(&session_id);
62971 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62972 struct audit_context *context = task->audit_context;
62973
62974 if (context && context->in_syscall) {
62975 diff --git a/kernel/capability.c b/kernel/capability.c
62976 index b463871..fa3ea1f 100644
62977 --- a/kernel/capability.c
62978 +++ b/kernel/capability.c
62979 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62980 * before modification is attempted and the application
62981 * fails.
62982 */
62983 + if (tocopy > ARRAY_SIZE(kdata))
62984 + return -EFAULT;
62985 +
62986 if (copy_to_user(dataptr, kdata, tocopy
62987 * sizeof(struct __user_cap_data_struct))) {
62988 return -EFAULT;
62989 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62990 BUG();
62991 }
62992
62993 - if (security_capable(ns, current_cred(), cap) == 0) {
62994 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62995 current->flags |= PF_SUPERPRIV;
62996 return true;
62997 }
62998 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
62999 }
63000 EXPORT_SYMBOL(ns_capable);
63001
63002 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
63003 +{
63004 + if (unlikely(!cap_valid(cap))) {
63005 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
63006 + BUG();
63007 + }
63008 +
63009 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
63010 + current->flags |= PF_SUPERPRIV;
63011 + return true;
63012 + }
63013 + return false;
63014 +}
63015 +EXPORT_SYMBOL(ns_capable_nolog);
63016 +
63017 +bool capable_nolog(int cap)
63018 +{
63019 + return ns_capable_nolog(&init_user_ns, cap);
63020 +}
63021 +EXPORT_SYMBOL(capable_nolog);
63022 +
63023 /**
63024 * task_ns_capable - Determine whether current task has a superior
63025 * capability targeted at a specific task's user namespace.
63026 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
63027 }
63028 EXPORT_SYMBOL(task_ns_capable);
63029
63030 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
63031 +{
63032 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
63033 +}
63034 +EXPORT_SYMBOL(task_ns_capable_nolog);
63035 +
63036 /**
63037 * nsown_capable - Check superior capability to one's own user_ns
63038 * @cap: The capability in question
63039 diff --git a/kernel/compat.c b/kernel/compat.c
63040 index f346ced..aa2b1f4 100644
63041 --- a/kernel/compat.c
63042 +++ b/kernel/compat.c
63043 @@ -13,6 +13,7 @@
63044
63045 #include <linux/linkage.h>
63046 #include <linux/compat.h>
63047 +#include <linux/module.h>
63048 #include <linux/errno.h>
63049 #include <linux/time.h>
63050 #include <linux/signal.h>
63051 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
63052 mm_segment_t oldfs;
63053 long ret;
63054
63055 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
63056 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
63057 oldfs = get_fs();
63058 set_fs(KERNEL_DS);
63059 ret = hrtimer_nanosleep_restart(restart);
63060 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
63061 oldfs = get_fs();
63062 set_fs(KERNEL_DS);
63063 ret = hrtimer_nanosleep(&tu,
63064 - rmtp ? (struct timespec __user *)&rmt : NULL,
63065 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
63066 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
63067 set_fs(oldfs);
63068
63069 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
63070 mm_segment_t old_fs = get_fs();
63071
63072 set_fs(KERNEL_DS);
63073 - ret = sys_sigpending((old_sigset_t __user *) &s);
63074 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
63075 set_fs(old_fs);
63076 if (ret == 0)
63077 ret = put_user(s, set);
63078 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
63079 old_fs = get_fs();
63080 set_fs(KERNEL_DS);
63081 ret = sys_sigprocmask(how,
63082 - set ? (old_sigset_t __user *) &s : NULL,
63083 - oset ? (old_sigset_t __user *) &s : NULL);
63084 + set ? (old_sigset_t __force_user *) &s : NULL,
63085 + oset ? (old_sigset_t __force_user *) &s : NULL);
63086 set_fs(old_fs);
63087 if (ret == 0)
63088 if (oset)
63089 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
63090 mm_segment_t old_fs = get_fs();
63091
63092 set_fs(KERNEL_DS);
63093 - ret = sys_old_getrlimit(resource, &r);
63094 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
63095 set_fs(old_fs);
63096
63097 if (!ret) {
63098 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
63099 mm_segment_t old_fs = get_fs();
63100
63101 set_fs(KERNEL_DS);
63102 - ret = sys_getrusage(who, (struct rusage __user *) &r);
63103 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
63104 set_fs(old_fs);
63105
63106 if (ret)
63107 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
63108 set_fs (KERNEL_DS);
63109 ret = sys_wait4(pid,
63110 (stat_addr ?
63111 - (unsigned int __user *) &status : NULL),
63112 - options, (struct rusage __user *) &r);
63113 + (unsigned int __force_user *) &status : NULL),
63114 + options, (struct rusage __force_user *) &r);
63115 set_fs (old_fs);
63116
63117 if (ret > 0) {
63118 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
63119 memset(&info, 0, sizeof(info));
63120
63121 set_fs(KERNEL_DS);
63122 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
63123 - uru ? (struct rusage __user *)&ru : NULL);
63124 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
63125 + uru ? (struct rusage __force_user *)&ru : NULL);
63126 set_fs(old_fs);
63127
63128 if ((ret < 0) || (info.si_signo == 0))
63129 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
63130 oldfs = get_fs();
63131 set_fs(KERNEL_DS);
63132 err = sys_timer_settime(timer_id, flags,
63133 - (struct itimerspec __user *) &newts,
63134 - (struct itimerspec __user *) &oldts);
63135 + (struct itimerspec __force_user *) &newts,
63136 + (struct itimerspec __force_user *) &oldts);
63137 set_fs(oldfs);
63138 if (!err && old && put_compat_itimerspec(old, &oldts))
63139 return -EFAULT;
63140 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
63141 oldfs = get_fs();
63142 set_fs(KERNEL_DS);
63143 err = sys_timer_gettime(timer_id,
63144 - (struct itimerspec __user *) &ts);
63145 + (struct itimerspec __force_user *) &ts);
63146 set_fs(oldfs);
63147 if (!err && put_compat_itimerspec(setting, &ts))
63148 return -EFAULT;
63149 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
63150 oldfs = get_fs();
63151 set_fs(KERNEL_DS);
63152 err = sys_clock_settime(which_clock,
63153 - (struct timespec __user *) &ts);
63154 + (struct timespec __force_user *) &ts);
63155 set_fs(oldfs);
63156 return err;
63157 }
63158 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
63159 oldfs = get_fs();
63160 set_fs(KERNEL_DS);
63161 err = sys_clock_gettime(which_clock,
63162 - (struct timespec __user *) &ts);
63163 + (struct timespec __force_user *) &ts);
63164 set_fs(oldfs);
63165 if (!err && put_compat_timespec(&ts, tp))
63166 return -EFAULT;
63167 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
63168
63169 oldfs = get_fs();
63170 set_fs(KERNEL_DS);
63171 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
63172 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
63173 set_fs(oldfs);
63174
63175 err = compat_put_timex(utp, &txc);
63176 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
63177 oldfs = get_fs();
63178 set_fs(KERNEL_DS);
63179 err = sys_clock_getres(which_clock,
63180 - (struct timespec __user *) &ts);
63181 + (struct timespec __force_user *) &ts);
63182 set_fs(oldfs);
63183 if (!err && tp && put_compat_timespec(&ts, tp))
63184 return -EFAULT;
63185 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
63186 long err;
63187 mm_segment_t oldfs;
63188 struct timespec tu;
63189 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
63190 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
63191
63192 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
63193 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
63194 oldfs = get_fs();
63195 set_fs(KERNEL_DS);
63196 err = clock_nanosleep_restart(restart);
63197 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
63198 oldfs = get_fs();
63199 set_fs(KERNEL_DS);
63200 err = sys_clock_nanosleep(which_clock, flags,
63201 - (struct timespec __user *) &in,
63202 - (struct timespec __user *) &out);
63203 + (struct timespec __force_user *) &in,
63204 + (struct timespec __force_user *) &out);
63205 set_fs(oldfs);
63206
63207 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
63208 diff --git a/kernel/configs.c b/kernel/configs.c
63209 index 42e8fa0..9e7406b 100644
63210 --- a/kernel/configs.c
63211 +++ b/kernel/configs.c
63212 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
63213 struct proc_dir_entry *entry;
63214
63215 /* create the current config file */
63216 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
63217 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
63218 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
63219 + &ikconfig_file_ops);
63220 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63221 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
63222 + &ikconfig_file_ops);
63223 +#endif
63224 +#else
63225 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
63226 &ikconfig_file_ops);
63227 +#endif
63228 +
63229 if (!entry)
63230 return -ENOMEM;
63231
63232 diff --git a/kernel/cred.c b/kernel/cred.c
63233 index 5791612..a3c04dc 100644
63234 --- a/kernel/cred.c
63235 +++ b/kernel/cred.c
63236 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
63237 validate_creds(cred);
63238 put_cred(cred);
63239 }
63240 +
63241 +#ifdef CONFIG_GRKERNSEC_SETXID
63242 + cred = (struct cred *) tsk->delayed_cred;
63243 + if (cred) {
63244 + tsk->delayed_cred = NULL;
63245 + validate_creds(cred);
63246 + put_cred(cred);
63247 + }
63248 +#endif
63249 }
63250
63251 /**
63252 @@ -470,7 +479,7 @@ error_put:
63253 * Always returns 0 thus allowing this function to be tail-called at the end
63254 * of, say, sys_setgid().
63255 */
63256 -int commit_creds(struct cred *new)
63257 +static int __commit_creds(struct cred *new)
63258 {
63259 struct task_struct *task = current;
63260 const struct cred *old = task->real_cred;
63261 @@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
63262
63263 get_cred(new); /* we will require a ref for the subj creds too */
63264
63265 + gr_set_role_label(task, new->uid, new->gid);
63266 +
63267 /* dumpability changes */
63268 if (old->euid != new->euid ||
63269 old->egid != new->egid ||
63270 @@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
63271 put_cred(old);
63272 return 0;
63273 }
63274 +#ifdef CONFIG_GRKERNSEC_SETXID
63275 +extern int set_user(struct cred *new);
63276 +
63277 +void gr_delayed_cred_worker(void)
63278 +{
63279 + const struct cred *new = current->delayed_cred;
63280 + struct cred *ncred;
63281 +
63282 + current->delayed_cred = NULL;
63283 +
63284 + if (current_uid() && new != NULL) {
63285 + // from doing get_cred on it when queueing this
63286 + put_cred(new);
63287 + return;
63288 + } else if (new == NULL)
63289 + return;
63290 +
63291 + ncred = prepare_creds();
63292 + if (!ncred)
63293 + goto die;
63294 + // uids
63295 + ncred->uid = new->uid;
63296 + ncred->euid = new->euid;
63297 + ncred->suid = new->suid;
63298 + ncred->fsuid = new->fsuid;
63299 + // gids
63300 + ncred->gid = new->gid;
63301 + ncred->egid = new->egid;
63302 + ncred->sgid = new->sgid;
63303 + ncred->fsgid = new->fsgid;
63304 + // groups
63305 + if (set_groups(ncred, new->group_info) < 0) {
63306 + abort_creds(ncred);
63307 + goto die;
63308 + }
63309 + // caps
63310 + ncred->securebits = new->securebits;
63311 + ncred->cap_inheritable = new->cap_inheritable;
63312 + ncred->cap_permitted = new->cap_permitted;
63313 + ncred->cap_effective = new->cap_effective;
63314 + ncred->cap_bset = new->cap_bset;
63315 +
63316 + if (set_user(ncred)) {
63317 + abort_creds(ncred);
63318 + goto die;
63319 + }
63320 +
63321 + // from doing get_cred on it when queueing this
63322 + put_cred(new);
63323 +
63324 + __commit_creds(ncred);
63325 + return;
63326 +die:
63327 + // from doing get_cred on it when queueing this
63328 + put_cred(new);
63329 + do_group_exit(SIGKILL);
63330 +}
63331 +#endif
63332 +
63333 +int commit_creds(struct cred *new)
63334 +{
63335 +#ifdef CONFIG_GRKERNSEC_SETXID
63336 + struct task_struct *t;
63337 +
63338 + /* we won't get called with tasklist_lock held for writing
63339 + and interrupts disabled as the cred struct in that case is
63340 + init_cred
63341 + */
63342 + if (grsec_enable_setxid && !current_is_single_threaded() &&
63343 + !current_uid() && new->uid) {
63344 + rcu_read_lock();
63345 + read_lock(&tasklist_lock);
63346 + for (t = next_thread(current); t != current;
63347 + t = next_thread(t)) {
63348 + if (t->delayed_cred == NULL) {
63349 + t->delayed_cred = get_cred(new);
63350 + set_tsk_need_resched(t);
63351 + }
63352 + }
63353 + read_unlock(&tasklist_lock);
63354 + rcu_read_unlock();
63355 + }
63356 +#endif
63357 + return __commit_creds(new);
63358 +}
63359 +
63360 EXPORT_SYMBOL(commit_creds);
63361
63362 /**
63363 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
63364 index 0d7c087..01b8cef 100644
63365 --- a/kernel/debug/debug_core.c
63366 +++ b/kernel/debug/debug_core.c
63367 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
63368 */
63369 static atomic_t masters_in_kgdb;
63370 static atomic_t slaves_in_kgdb;
63371 -static atomic_t kgdb_break_tasklet_var;
63372 +static atomic_unchecked_t kgdb_break_tasklet_var;
63373 atomic_t kgdb_setting_breakpoint;
63374
63375 struct task_struct *kgdb_usethread;
63376 @@ -129,7 +129,7 @@ int kgdb_single_step;
63377 static pid_t kgdb_sstep_pid;
63378
63379 /* to keep track of the CPU which is doing the single stepping*/
63380 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63381 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63382
63383 /*
63384 * If you are debugging a problem where roundup (the collection of
63385 @@ -542,7 +542,7 @@ return_normal:
63386 * kernel will only try for the value of sstep_tries before
63387 * giving up and continuing on.
63388 */
63389 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63390 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63391 (kgdb_info[cpu].task &&
63392 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
63393 atomic_set(&kgdb_active, -1);
63394 @@ -636,8 +636,8 @@ cpu_master_loop:
63395 }
63396
63397 kgdb_restore:
63398 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
63399 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
63400 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
63401 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
63402 if (kgdb_info[sstep_cpu].task)
63403 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
63404 else
63405 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
63406 static void kgdb_tasklet_bpt(unsigned long ing)
63407 {
63408 kgdb_breakpoint();
63409 - atomic_set(&kgdb_break_tasklet_var, 0);
63410 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63411 }
63412
63413 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63414
63415 void kgdb_schedule_breakpoint(void)
63416 {
63417 - if (atomic_read(&kgdb_break_tasklet_var) ||
63418 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63419 atomic_read(&kgdb_active) != -1 ||
63420 atomic_read(&kgdb_setting_breakpoint))
63421 return;
63422 - atomic_inc(&kgdb_break_tasklet_var);
63423 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
63424 tasklet_schedule(&kgdb_tasklet_breakpoint);
63425 }
63426 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63427 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
63428 index 63786e7..0780cac 100644
63429 --- a/kernel/debug/kdb/kdb_main.c
63430 +++ b/kernel/debug/kdb/kdb_main.c
63431 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
63432 list_for_each_entry(mod, kdb_modules, list) {
63433
63434 kdb_printf("%-20s%8u 0x%p ", mod->name,
63435 - mod->core_size, (void *)mod);
63436 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
63437 #ifdef CONFIG_MODULE_UNLOAD
63438 kdb_printf("%4d ", module_refcount(mod));
63439 #endif
63440 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63441 kdb_printf(" (Loading)");
63442 else
63443 kdb_printf(" (Live)");
63444 - kdb_printf(" 0x%p", mod->module_core);
63445 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63446
63447 #ifdef CONFIG_MODULE_UNLOAD
63448 {
63449 diff --git a/kernel/events/core.c b/kernel/events/core.c
63450 index 58690af..d903d75 100644
63451 --- a/kernel/events/core.c
63452 +++ b/kernel/events/core.c
63453 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63454 return 0;
63455 }
63456
63457 -static atomic64_t perf_event_id;
63458 +static atomic64_unchecked_t perf_event_id;
63459
63460 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63461 enum event_type_t event_type);
63462 @@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63463
63464 static inline u64 perf_event_count(struct perf_event *event)
63465 {
63466 - return local64_read(&event->count) + atomic64_read(&event->child_count);
63467 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63468 }
63469
63470 static u64 perf_event_read(struct perf_event *event)
63471 @@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63472 mutex_lock(&event->child_mutex);
63473 total += perf_event_read(event);
63474 *enabled += event->total_time_enabled +
63475 - atomic64_read(&event->child_total_time_enabled);
63476 + atomic64_read_unchecked(&event->child_total_time_enabled);
63477 *running += event->total_time_running +
63478 - atomic64_read(&event->child_total_time_running);
63479 + atomic64_read_unchecked(&event->child_total_time_running);
63480
63481 list_for_each_entry(child, &event->child_list, child_list) {
63482 total += perf_event_read(child);
63483 @@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63484 userpg->offset -= local64_read(&event->hw.prev_count);
63485
63486 userpg->time_enabled = enabled +
63487 - atomic64_read(&event->child_total_time_enabled);
63488 + atomic64_read_unchecked(&event->child_total_time_enabled);
63489
63490 userpg->time_running = running +
63491 - atomic64_read(&event->child_total_time_running);
63492 + atomic64_read_unchecked(&event->child_total_time_running);
63493
63494 barrier();
63495 ++userpg->lock;
63496 @@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63497 values[n++] = perf_event_count(event);
63498 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63499 values[n++] = enabled +
63500 - atomic64_read(&event->child_total_time_enabled);
63501 + atomic64_read_unchecked(&event->child_total_time_enabled);
63502 }
63503 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63504 values[n++] = running +
63505 - atomic64_read(&event->child_total_time_running);
63506 + atomic64_read_unchecked(&event->child_total_time_running);
63507 }
63508 if (read_format & PERF_FORMAT_ID)
63509 values[n++] = primary_event_id(event);
63510 @@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63511 * need to add enough zero bytes after the string to handle
63512 * the 64bit alignment we do later.
63513 */
63514 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63515 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
63516 if (!buf) {
63517 name = strncpy(tmp, "//enomem", sizeof(tmp));
63518 goto got_name;
63519 }
63520 - name = d_path(&file->f_path, buf, PATH_MAX);
63521 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63522 if (IS_ERR(name)) {
63523 name = strncpy(tmp, "//toolong", sizeof(tmp));
63524 goto got_name;
63525 @@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63526 event->parent = parent_event;
63527
63528 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63529 - event->id = atomic64_inc_return(&perf_event_id);
63530 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
63531
63532 event->state = PERF_EVENT_STATE_INACTIVE;
63533
63534 @@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63535 /*
63536 * Add back the child's count to the parent's count:
63537 */
63538 - atomic64_add(child_val, &parent_event->child_count);
63539 - atomic64_add(child_event->total_time_enabled,
63540 + atomic64_add_unchecked(child_val, &parent_event->child_count);
63541 + atomic64_add_unchecked(child_event->total_time_enabled,
63542 &parent_event->child_total_time_enabled);
63543 - atomic64_add(child_event->total_time_running,
63544 + atomic64_add_unchecked(child_event->total_time_running,
63545 &parent_event->child_total_time_running);
63546
63547 /*
63548 diff --git a/kernel/exit.c b/kernel/exit.c
63549 index e6e01b9..619f837 100644
63550 --- a/kernel/exit.c
63551 +++ b/kernel/exit.c
63552 @@ -57,6 +57,10 @@
63553 #include <asm/pgtable.h>
63554 #include <asm/mmu_context.h>
63555
63556 +#ifdef CONFIG_GRKERNSEC
63557 +extern rwlock_t grsec_exec_file_lock;
63558 +#endif
63559 +
63560 static void exit_mm(struct task_struct * tsk);
63561
63562 static void __unhash_process(struct task_struct *p, bool group_dead)
63563 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63564 struct task_struct *leader;
63565 int zap_leader;
63566 repeat:
63567 +#ifdef CONFIG_NET
63568 + gr_del_task_from_ip_table(p);
63569 +#endif
63570 +
63571 /* don't need to get the RCU readlock here - the process is dead and
63572 * can't be modifying its own credentials. But shut RCU-lockdep up */
63573 rcu_read_lock();
63574 @@ -380,7 +388,7 @@ int allow_signal(int sig)
63575 * know it'll be handled, so that they don't get converted to
63576 * SIGKILL or just silently dropped.
63577 */
63578 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63579 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63580 recalc_sigpending();
63581 spin_unlock_irq(&current->sighand->siglock);
63582 return 0;
63583 @@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63584 vsnprintf(current->comm, sizeof(current->comm), name, args);
63585 va_end(args);
63586
63587 +#ifdef CONFIG_GRKERNSEC
63588 + write_lock(&grsec_exec_file_lock);
63589 + if (current->exec_file) {
63590 + fput(current->exec_file);
63591 + current->exec_file = NULL;
63592 + }
63593 + write_unlock(&grsec_exec_file_lock);
63594 +#endif
63595 +
63596 + gr_set_kernel_label(current);
63597 +
63598 /*
63599 * If we were started as result of loading a module, close all of the
63600 * user space pages. We don't need them, and if we didn't close them
63601 @@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63602 struct task_struct *tsk = current;
63603 int group_dead;
63604
63605 + set_fs(USER_DS);
63606 +
63607 profile_task_exit(tsk);
63608
63609 WARN_ON(blk_needs_flush_plug(tsk));
63610 @@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63611 * mm_release()->clear_child_tid() from writing to a user-controlled
63612 * kernel address.
63613 */
63614 - set_fs(USER_DS);
63615
63616 ptrace_event(PTRACE_EVENT_EXIT, code);
63617
63618 @@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63619 tsk->exit_code = code;
63620 taskstats_exit(tsk, group_dead);
63621
63622 + gr_acl_handle_psacct(tsk, code);
63623 + gr_acl_handle_exit();
63624 +
63625 exit_mm(tsk);
63626
63627 if (group_dead)
63628 diff --git a/kernel/fork.c b/kernel/fork.c
63629 index da4a6a1..0973380 100644
63630 --- a/kernel/fork.c
63631 +++ b/kernel/fork.c
63632 @@ -280,7 +280,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63633 *stackend = STACK_END_MAGIC; /* for overflow detection */
63634
63635 #ifdef CONFIG_CC_STACKPROTECTOR
63636 - tsk->stack_canary = get_random_int();
63637 + tsk->stack_canary = pax_get_random_long();
63638 #endif
63639
63640 /*
63641 @@ -304,13 +304,77 @@ out:
63642 }
63643
63644 #ifdef CONFIG_MMU
63645 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63646 +{
63647 + struct vm_area_struct *tmp;
63648 + unsigned long charge;
63649 + struct mempolicy *pol;
63650 + struct file *file;
63651 +
63652 + charge = 0;
63653 + if (mpnt->vm_flags & VM_ACCOUNT) {
63654 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63655 + if (security_vm_enough_memory(len))
63656 + goto fail_nomem;
63657 + charge = len;
63658 + }
63659 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63660 + if (!tmp)
63661 + goto fail_nomem;
63662 + *tmp = *mpnt;
63663 + tmp->vm_mm = mm;
63664 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
63665 + pol = mpol_dup(vma_policy(mpnt));
63666 + if (IS_ERR(pol))
63667 + goto fail_nomem_policy;
63668 + vma_set_policy(tmp, pol);
63669 + if (anon_vma_fork(tmp, mpnt))
63670 + goto fail_nomem_anon_vma_fork;
63671 + tmp->vm_flags &= ~VM_LOCKED;
63672 + tmp->vm_next = tmp->vm_prev = NULL;
63673 + tmp->vm_mirror = NULL;
63674 + file = tmp->vm_file;
63675 + if (file) {
63676 + struct inode *inode = file->f_path.dentry->d_inode;
63677 + struct address_space *mapping = file->f_mapping;
63678 +
63679 + get_file(file);
63680 + if (tmp->vm_flags & VM_DENYWRITE)
63681 + atomic_dec(&inode->i_writecount);
63682 + mutex_lock(&mapping->i_mmap_mutex);
63683 + if (tmp->vm_flags & VM_SHARED)
63684 + mapping->i_mmap_writable++;
63685 + flush_dcache_mmap_lock(mapping);
63686 + /* insert tmp into the share list, just after mpnt */
63687 + vma_prio_tree_add(tmp, mpnt);
63688 + flush_dcache_mmap_unlock(mapping);
63689 + mutex_unlock(&mapping->i_mmap_mutex);
63690 + }
63691 +
63692 + /*
63693 + * Clear hugetlb-related page reserves for children. This only
63694 + * affects MAP_PRIVATE mappings. Faults generated by the child
63695 + * are not guaranteed to succeed, even if read-only
63696 + */
63697 + if (is_vm_hugetlb_page(tmp))
63698 + reset_vma_resv_huge_pages(tmp);
63699 +
63700 + return tmp;
63701 +
63702 +fail_nomem_anon_vma_fork:
63703 + mpol_put(pol);
63704 +fail_nomem_policy:
63705 + kmem_cache_free(vm_area_cachep, tmp);
63706 +fail_nomem:
63707 + vm_unacct_memory(charge);
63708 + return NULL;
63709 +}
63710 +
63711 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63712 {
63713 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63714 struct rb_node **rb_link, *rb_parent;
63715 int retval;
63716 - unsigned long charge;
63717 - struct mempolicy *pol;
63718
63719 down_write(&oldmm->mmap_sem);
63720 flush_cache_dup_mm(oldmm);
63721 @@ -322,8 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63722 mm->locked_vm = 0;
63723 mm->mmap = NULL;
63724 mm->mmap_cache = NULL;
63725 - mm->free_area_cache = oldmm->mmap_base;
63726 - mm->cached_hole_size = ~0UL;
63727 + mm->free_area_cache = oldmm->free_area_cache;
63728 + mm->cached_hole_size = oldmm->cached_hole_size;
63729 mm->map_count = 0;
63730 cpumask_clear(mm_cpumask(mm));
63731 mm->mm_rb = RB_ROOT;
63732 @@ -339,8 +403,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63733
63734 prev = NULL;
63735 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63736 - struct file *file;
63737 -
63738 if (mpnt->vm_flags & VM_DONTCOPY) {
63739 long pages = vma_pages(mpnt);
63740 mm->total_vm -= pages;
63741 @@ -348,53 +410,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63742 -pages);
63743 continue;
63744 }
63745 - charge = 0;
63746 - if (mpnt->vm_flags & VM_ACCOUNT) {
63747 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63748 - if (security_vm_enough_memory(len))
63749 - goto fail_nomem;
63750 - charge = len;
63751 + tmp = dup_vma(mm, mpnt);
63752 + if (!tmp) {
63753 + retval = -ENOMEM;
63754 + goto out;
63755 }
63756 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63757 - if (!tmp)
63758 - goto fail_nomem;
63759 - *tmp = *mpnt;
63760 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
63761 - pol = mpol_dup(vma_policy(mpnt));
63762 - retval = PTR_ERR(pol);
63763 - if (IS_ERR(pol))
63764 - goto fail_nomem_policy;
63765 - vma_set_policy(tmp, pol);
63766 - tmp->vm_mm = mm;
63767 - if (anon_vma_fork(tmp, mpnt))
63768 - goto fail_nomem_anon_vma_fork;
63769 - tmp->vm_flags &= ~VM_LOCKED;
63770 - tmp->vm_next = tmp->vm_prev = NULL;
63771 - file = tmp->vm_file;
63772 - if (file) {
63773 - struct inode *inode = file->f_path.dentry->d_inode;
63774 - struct address_space *mapping = file->f_mapping;
63775 -
63776 - get_file(file);
63777 - if (tmp->vm_flags & VM_DENYWRITE)
63778 - atomic_dec(&inode->i_writecount);
63779 - mutex_lock(&mapping->i_mmap_mutex);
63780 - if (tmp->vm_flags & VM_SHARED)
63781 - mapping->i_mmap_writable++;
63782 - flush_dcache_mmap_lock(mapping);
63783 - /* insert tmp into the share list, just after mpnt */
63784 - vma_prio_tree_add(tmp, mpnt);
63785 - flush_dcache_mmap_unlock(mapping);
63786 - mutex_unlock(&mapping->i_mmap_mutex);
63787 - }
63788 -
63789 - /*
63790 - * Clear hugetlb-related page reserves for children. This only
63791 - * affects MAP_PRIVATE mappings. Faults generated by the child
63792 - * are not guaranteed to succeed, even if read-only
63793 - */
63794 - if (is_vm_hugetlb_page(tmp))
63795 - reset_vma_resv_huge_pages(tmp);
63796
63797 /*
63798 * Link in the new vma and copy the page table entries.
63799 @@ -417,6 +437,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63800 if (retval)
63801 goto out;
63802 }
63803 +
63804 +#ifdef CONFIG_PAX_SEGMEXEC
63805 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63806 + struct vm_area_struct *mpnt_m;
63807 +
63808 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63809 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63810 +
63811 + if (!mpnt->vm_mirror)
63812 + continue;
63813 +
63814 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63815 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63816 + mpnt->vm_mirror = mpnt_m;
63817 + } else {
63818 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63819 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63820 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63821 + mpnt->vm_mirror->vm_mirror = mpnt;
63822 + }
63823 + }
63824 + BUG_ON(mpnt_m);
63825 + }
63826 +#endif
63827 +
63828 /* a new mm has just been created */
63829 arch_dup_mmap(oldmm, mm);
63830 retval = 0;
63831 @@ -425,14 +470,6 @@ out:
63832 flush_tlb_mm(oldmm);
63833 up_write(&oldmm->mmap_sem);
63834 return retval;
63835 -fail_nomem_anon_vma_fork:
63836 - mpol_put(pol);
63837 -fail_nomem_policy:
63838 - kmem_cache_free(vm_area_cachep, tmp);
63839 -fail_nomem:
63840 - retval = -ENOMEM;
63841 - vm_unacct_memory(charge);
63842 - goto out;
63843 }
63844
63845 static inline int mm_alloc_pgd(struct mm_struct *mm)
63846 @@ -644,6 +681,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
63847 }
63848 EXPORT_SYMBOL_GPL(get_task_mm);
63849
63850 +struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
63851 +{
63852 + struct mm_struct *mm;
63853 + int err;
63854 +
63855 + err = mutex_lock_killable(&task->signal->cred_guard_mutex);
63856 + if (err)
63857 + return ERR_PTR(err);
63858 +
63859 + mm = get_task_mm(task);
63860 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
63861 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
63862 + mmput(mm);
63863 + mm = ERR_PTR(-EACCES);
63864 + }
63865 + mutex_unlock(&task->signal->cred_guard_mutex);
63866 +
63867 + return mm;
63868 +}
63869 +
63870 /* Please note the differences between mmput and mm_release.
63871 * mmput is called whenever we stop holding onto a mm_struct,
63872 * error success whatever.
63873 @@ -829,13 +886,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63874 spin_unlock(&fs->lock);
63875 return -EAGAIN;
63876 }
63877 - fs->users++;
63878 + atomic_inc(&fs->users);
63879 spin_unlock(&fs->lock);
63880 return 0;
63881 }
63882 tsk->fs = copy_fs_struct(fs);
63883 if (!tsk->fs)
63884 return -ENOMEM;
63885 + gr_set_chroot_entries(tsk, &tsk->fs->root);
63886 return 0;
63887 }
63888
63889 @@ -1097,6 +1155,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63890 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63891 #endif
63892 retval = -EAGAIN;
63893 +
63894 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63895 +
63896 if (atomic_read(&p->real_cred->user->processes) >=
63897 task_rlimit(p, RLIMIT_NPROC)) {
63898 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63899 @@ -1256,6 +1317,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63900 if (clone_flags & CLONE_THREAD)
63901 p->tgid = current->tgid;
63902
63903 + gr_copy_label(p);
63904 +
63905 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63906 /*
63907 * Clear TID on mm_release()?
63908 @@ -1418,6 +1481,8 @@ bad_fork_cleanup_count:
63909 bad_fork_free:
63910 free_task(p);
63911 fork_out:
63912 + gr_log_forkfail(retval);
63913 +
63914 return ERR_PTR(retval);
63915 }
63916
63917 @@ -1518,6 +1583,8 @@ long do_fork(unsigned long clone_flags,
63918 if (clone_flags & CLONE_PARENT_SETTID)
63919 put_user(nr, parent_tidptr);
63920
63921 + gr_handle_brute_check();
63922 +
63923 if (clone_flags & CLONE_VFORK) {
63924 p->vfork_done = &vfork;
63925 init_completion(&vfork);
63926 @@ -1627,7 +1694,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63927 return 0;
63928
63929 /* don't need lock here; in the worst case we'll do useless copy */
63930 - if (fs->users == 1)
63931 + if (atomic_read(&fs->users) == 1)
63932 return 0;
63933
63934 *new_fsp = copy_fs_struct(fs);
63935 @@ -1716,7 +1783,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63936 fs = current->fs;
63937 spin_lock(&fs->lock);
63938 current->fs = new_fs;
63939 - if (--fs->users)
63940 + gr_set_chroot_entries(current, &current->fs->root);
63941 + if (atomic_dec_return(&fs->users))
63942 new_fs = NULL;
63943 else
63944 new_fs = fs;
63945 diff --git a/kernel/futex.c b/kernel/futex.c
63946 index 1614be2..37abc7e 100644
63947 --- a/kernel/futex.c
63948 +++ b/kernel/futex.c
63949 @@ -54,6 +54,7 @@
63950 #include <linux/mount.h>
63951 #include <linux/pagemap.h>
63952 #include <linux/syscalls.h>
63953 +#include <linux/ptrace.h>
63954 #include <linux/signal.h>
63955 #include <linux/export.h>
63956 #include <linux/magic.h>
63957 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63958 struct page *page, *page_head;
63959 int err, ro = 0;
63960
63961 +#ifdef CONFIG_PAX_SEGMEXEC
63962 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63963 + return -EFAULT;
63964 +#endif
63965 +
63966 /*
63967 * The futex address must be "naturally" aligned.
63968 */
63969 @@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63970 if (!p)
63971 goto err_unlock;
63972 ret = -EPERM;
63973 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63974 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63975 + goto err_unlock;
63976 +#endif
63977 pcred = __task_cred(p);
63978 /* If victim is in different user_ns, then uids are not
63979 comparable, so we must have CAP_SYS_PTRACE */
63980 @@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63981 {
63982 u32 curval;
63983 int i;
63984 + mm_segment_t oldfs;
63985
63986 /*
63987 * This will fail and we want it. Some arch implementations do
63988 @@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63989 * implementation, the non-functional ones will return
63990 * -ENOSYS.
63991 */
63992 + oldfs = get_fs();
63993 + set_fs(USER_DS);
63994 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63995 futex_cmpxchg_enabled = 1;
63996 + set_fs(oldfs);
63997
63998 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63999 plist_head_init(&futex_queues[i].chain);
64000 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
64001 index 5f9e689..582d46d 100644
64002 --- a/kernel/futex_compat.c
64003 +++ b/kernel/futex_compat.c
64004 @@ -10,6 +10,7 @@
64005 #include <linux/compat.h>
64006 #include <linux/nsproxy.h>
64007 #include <linux/futex.h>
64008 +#include <linux/ptrace.h>
64009
64010 #include <asm/uaccess.h>
64011
64012 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
64013 {
64014 struct compat_robust_list_head __user *head;
64015 unsigned long ret;
64016 - const struct cred *cred = current_cred(), *pcred;
64017 + const struct cred *cred = current_cred();
64018 + const struct cred *pcred;
64019
64020 if (!futex_cmpxchg_enabled)
64021 return -ENOSYS;
64022 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
64023 if (!p)
64024 goto err_unlock;
64025 ret = -EPERM;
64026 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64027 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
64028 + goto err_unlock;
64029 +#endif
64030 pcred = __task_cred(p);
64031 /* If victim is in different user_ns, then uids are not
64032 comparable, so we must have CAP_SYS_PTRACE */
64033 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
64034 index 9b22d03..6295b62 100644
64035 --- a/kernel/gcov/base.c
64036 +++ b/kernel/gcov/base.c
64037 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
64038 }
64039
64040 #ifdef CONFIG_MODULES
64041 -static inline int within(void *addr, void *start, unsigned long size)
64042 -{
64043 - return ((addr >= start) && (addr < start + size));
64044 -}
64045 -
64046 /* Update list and generate events when modules are unloaded. */
64047 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
64048 void *data)
64049 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
64050 prev = NULL;
64051 /* Remove entries located in module from linked list. */
64052 for (info = gcov_info_head; info; info = info->next) {
64053 - if (within(info, mod->module_core, mod->core_size)) {
64054 + if (within_module_core_rw((unsigned long)info, mod)) {
64055 if (prev)
64056 prev->next = info->next;
64057 else
64058 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
64059 index ae34bf5..4e2f3d0 100644
64060 --- a/kernel/hrtimer.c
64061 +++ b/kernel/hrtimer.c
64062 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
64063 local_irq_restore(flags);
64064 }
64065
64066 -static void run_hrtimer_softirq(struct softirq_action *h)
64067 +static void run_hrtimer_softirq(void)
64068 {
64069 hrtimer_peek_ahead_timers();
64070 }
64071 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
64072 index 66ff710..05a5128 100644
64073 --- a/kernel/jump_label.c
64074 +++ b/kernel/jump_label.c
64075 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
64076
64077 size = (((unsigned long)stop - (unsigned long)start)
64078 / sizeof(struct jump_entry));
64079 + pax_open_kernel();
64080 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
64081 + pax_close_kernel();
64082 }
64083
64084 static void jump_label_update(struct jump_label_key *key, int enable);
64085 @@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
64086 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
64087 struct jump_entry *iter;
64088
64089 + pax_open_kernel();
64090 for (iter = iter_start; iter < iter_stop; iter++) {
64091 if (within_module_init(iter->code, mod))
64092 iter->code = 0;
64093 }
64094 + pax_close_kernel();
64095 }
64096
64097 static int
64098 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
64099 index 079f1d3..a407562 100644
64100 --- a/kernel/kallsyms.c
64101 +++ b/kernel/kallsyms.c
64102 @@ -11,6 +11,9 @@
64103 * Changed the compression method from stem compression to "table lookup"
64104 * compression (see scripts/kallsyms.c for a more complete description)
64105 */
64106 +#ifdef CONFIG_GRKERNSEC_HIDESYM
64107 +#define __INCLUDED_BY_HIDESYM 1
64108 +#endif
64109 #include <linux/kallsyms.h>
64110 #include <linux/module.h>
64111 #include <linux/init.h>
64112 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
64113
64114 static inline int is_kernel_inittext(unsigned long addr)
64115 {
64116 + if (system_state != SYSTEM_BOOTING)
64117 + return 0;
64118 +
64119 if (addr >= (unsigned long)_sinittext
64120 && addr <= (unsigned long)_einittext)
64121 return 1;
64122 return 0;
64123 }
64124
64125 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64126 +#ifdef CONFIG_MODULES
64127 +static inline int is_module_text(unsigned long addr)
64128 +{
64129 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
64130 + return 1;
64131 +
64132 + addr = ktla_ktva(addr);
64133 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
64134 +}
64135 +#else
64136 +static inline int is_module_text(unsigned long addr)
64137 +{
64138 + return 0;
64139 +}
64140 +#endif
64141 +#endif
64142 +
64143 static inline int is_kernel_text(unsigned long addr)
64144 {
64145 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
64146 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
64147
64148 static inline int is_kernel(unsigned long addr)
64149 {
64150 +
64151 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64152 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
64153 + return 1;
64154 +
64155 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
64156 +#else
64157 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
64158 +#endif
64159 +
64160 return 1;
64161 return in_gate_area_no_mm(addr);
64162 }
64163
64164 static int is_ksym_addr(unsigned long addr)
64165 {
64166 +
64167 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64168 + if (is_module_text(addr))
64169 + return 0;
64170 +#endif
64171 +
64172 if (all_var)
64173 return is_kernel(addr);
64174
64175 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
64176
64177 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
64178 {
64179 - iter->name[0] = '\0';
64180 iter->nameoff = get_symbol_offset(new_pos);
64181 iter->pos = new_pos;
64182 }
64183 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
64184 {
64185 struct kallsym_iter *iter = m->private;
64186
64187 +#ifdef CONFIG_GRKERNSEC_HIDESYM
64188 + if (current_uid())
64189 + return 0;
64190 +#endif
64191 +
64192 /* Some debugging symbols have no name. Ignore them. */
64193 if (!iter->name[0])
64194 return 0;
64195 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
64196 struct kallsym_iter *iter;
64197 int ret;
64198
64199 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
64200 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
64201 if (!iter)
64202 return -ENOMEM;
64203 reset_iter(iter, 0);
64204 diff --git a/kernel/kexec.c b/kernel/kexec.c
64205 index dc7bc08..4601964 100644
64206 --- a/kernel/kexec.c
64207 +++ b/kernel/kexec.c
64208 @@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
64209 unsigned long flags)
64210 {
64211 struct compat_kexec_segment in;
64212 - struct kexec_segment out, __user *ksegments;
64213 + struct kexec_segment out;
64214 + struct kexec_segment __user *ksegments;
64215 unsigned long i, result;
64216
64217 /* Don't allow clients that don't understand the native
64218 diff --git a/kernel/kmod.c b/kernel/kmod.c
64219 index a4bea97..7a1ae9a 100644
64220 --- a/kernel/kmod.c
64221 +++ b/kernel/kmod.c
64222 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
64223 * If module auto-loading support is disabled then this function
64224 * becomes a no-operation.
64225 */
64226 -int __request_module(bool wait, const char *fmt, ...)
64227 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
64228 {
64229 - va_list args;
64230 char module_name[MODULE_NAME_LEN];
64231 unsigned int max_modprobes;
64232 int ret;
64233 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
64234 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
64235 static char *envp[] = { "HOME=/",
64236 "TERM=linux",
64237 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
64238 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
64239 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
64240 static int kmod_loop_msg;
64241
64242 - va_start(args, fmt);
64243 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
64244 - va_end(args);
64245 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
64246 if (ret >= MODULE_NAME_LEN)
64247 return -ENAMETOOLONG;
64248
64249 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
64250 if (ret)
64251 return ret;
64252
64253 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64254 + if (!current_uid()) {
64255 + /* hack to workaround consolekit/udisks stupidity */
64256 + read_lock(&tasklist_lock);
64257 + if (!strcmp(current->comm, "mount") &&
64258 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
64259 + read_unlock(&tasklist_lock);
64260 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
64261 + return -EPERM;
64262 + }
64263 + read_unlock(&tasklist_lock);
64264 + }
64265 +#endif
64266 +
64267 /* If modprobe needs a service that is in a module, we get a recursive
64268 * loop. Limit the number of running kmod threads to max_threads/2 or
64269 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
64270 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
64271 atomic_dec(&kmod_concurrent);
64272 return ret;
64273 }
64274 +
64275 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
64276 +{
64277 + va_list args;
64278 + int ret;
64279 +
64280 + va_start(args, fmt);
64281 + ret = ____request_module(wait, module_param, fmt, args);
64282 + va_end(args);
64283 +
64284 + return ret;
64285 +}
64286 +
64287 +int __request_module(bool wait, const char *fmt, ...)
64288 +{
64289 + va_list args;
64290 + int ret;
64291 +
64292 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64293 + if (current_uid()) {
64294 + char module_param[MODULE_NAME_LEN];
64295 +
64296 + memset(module_param, 0, sizeof(module_param));
64297 +
64298 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
64299 +
64300 + va_start(args, fmt);
64301 + ret = ____request_module(wait, module_param, fmt, args);
64302 + va_end(args);
64303 +
64304 + return ret;
64305 + }
64306 +#endif
64307 +
64308 + va_start(args, fmt);
64309 + ret = ____request_module(wait, NULL, fmt, args);
64310 + va_end(args);
64311 +
64312 + return ret;
64313 +}
64314 +
64315 EXPORT_SYMBOL(__request_module);
64316 #endif /* CONFIG_MODULES */
64317
64318 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
64319 *
64320 * Thus the __user pointer cast is valid here.
64321 */
64322 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
64323 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
64324
64325 /*
64326 * If ret is 0, either ____call_usermodehelper failed and the
64327 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
64328 index faa39d1..d7ad37e 100644
64329 --- a/kernel/kprobes.c
64330 +++ b/kernel/kprobes.c
64331 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
64332 * kernel image and loaded module images reside. This is required
64333 * so x86_64 can correctly handle the %rip-relative fixups.
64334 */
64335 - kip->insns = module_alloc(PAGE_SIZE);
64336 + kip->insns = module_alloc_exec(PAGE_SIZE);
64337 if (!kip->insns) {
64338 kfree(kip);
64339 return NULL;
64340 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
64341 */
64342 if (!list_is_singular(&kip->list)) {
64343 list_del(&kip->list);
64344 - module_free(NULL, kip->insns);
64345 + module_free_exec(NULL, kip->insns);
64346 kfree(kip);
64347 }
64348 return 1;
64349 @@ -1953,7 +1953,7 @@ static int __init init_kprobes(void)
64350 {
64351 int i, err = 0;
64352 unsigned long offset = 0, size = 0;
64353 - char *modname, namebuf[128];
64354 + char *modname, namebuf[KSYM_NAME_LEN];
64355 const char *symbol_name;
64356 void *addr;
64357 struct kprobe_blackpoint *kb;
64358 @@ -2079,7 +2079,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
64359 const char *sym = NULL;
64360 unsigned int i = *(loff_t *) v;
64361 unsigned long offset = 0;
64362 - char *modname, namebuf[128];
64363 + char *modname, namebuf[KSYM_NAME_LEN];
64364
64365 head = &kprobe_table[i];
64366 preempt_disable();
64367 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
64368 index b2e08c9..01d8049 100644
64369 --- a/kernel/lockdep.c
64370 +++ b/kernel/lockdep.c
64371 @@ -592,6 +592,10 @@ static int static_obj(void *obj)
64372 end = (unsigned long) &_end,
64373 addr = (unsigned long) obj;
64374
64375 +#ifdef CONFIG_PAX_KERNEXEC
64376 + start = ktla_ktva(start);
64377 +#endif
64378 +
64379 /*
64380 * static variable?
64381 */
64382 @@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
64383 if (!static_obj(lock->key)) {
64384 debug_locks_off();
64385 printk("INFO: trying to register non-static key.\n");
64386 + printk("lock:%pS key:%pS.\n", lock, lock->key);
64387 printk("the code is fine but needs lockdep annotation.\n");
64388 printk("turning off the locking correctness validator.\n");
64389 dump_stack();
64390 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
64391 if (!class)
64392 return 0;
64393 }
64394 - atomic_inc((atomic_t *)&class->ops);
64395 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
64396 if (very_verbose(class)) {
64397 printk("\nacquire class [%p] %s", class->key, class->name);
64398 if (class->name_version > 1)
64399 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
64400 index 91c32a0..b2c71c5 100644
64401 --- a/kernel/lockdep_proc.c
64402 +++ b/kernel/lockdep_proc.c
64403 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
64404
64405 static void print_name(struct seq_file *m, struct lock_class *class)
64406 {
64407 - char str[128];
64408 + char str[KSYM_NAME_LEN];
64409 const char *name = class->name;
64410
64411 if (!name) {
64412 diff --git a/kernel/module.c b/kernel/module.c
64413 index 178333c..04e3408 100644
64414 --- a/kernel/module.c
64415 +++ b/kernel/module.c
64416 @@ -58,6 +58,7 @@
64417 #include <linux/jump_label.h>
64418 #include <linux/pfn.h>
64419 #include <linux/bsearch.h>
64420 +#include <linux/grsecurity.h>
64421
64422 #define CREATE_TRACE_POINTS
64423 #include <trace/events/module.h>
64424 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64425
64426 /* Bounds of module allocation, for speeding __module_address.
64427 * Protected by module_mutex. */
64428 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64429 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64430 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64431
64432 int register_module_notifier(struct notifier_block * nb)
64433 {
64434 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64435 return true;
64436
64437 list_for_each_entry_rcu(mod, &modules, list) {
64438 - struct symsearch arr[] = {
64439 + struct symsearch modarr[] = {
64440 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64441 NOT_GPL_ONLY, false },
64442 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64443 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64444 #endif
64445 };
64446
64447 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64448 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64449 return true;
64450 }
64451 return false;
64452 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64453 static int percpu_modalloc(struct module *mod,
64454 unsigned long size, unsigned long align)
64455 {
64456 - if (align > PAGE_SIZE) {
64457 + if (align-1 >= PAGE_SIZE) {
64458 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64459 mod->name, align, PAGE_SIZE);
64460 align = PAGE_SIZE;
64461 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64462 */
64463 #ifdef CONFIG_SYSFS
64464
64465 -#ifdef CONFIG_KALLSYMS
64466 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64467 static inline bool sect_empty(const Elf_Shdr *sect)
64468 {
64469 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64470 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64471
64472 static void unset_module_core_ro_nx(struct module *mod)
64473 {
64474 - set_page_attributes(mod->module_core + mod->core_text_size,
64475 - mod->module_core + mod->core_size,
64476 + set_page_attributes(mod->module_core_rw,
64477 + mod->module_core_rw + mod->core_size_rw,
64478 set_memory_x);
64479 - set_page_attributes(mod->module_core,
64480 - mod->module_core + mod->core_ro_size,
64481 + set_page_attributes(mod->module_core_rx,
64482 + mod->module_core_rx + mod->core_size_rx,
64483 set_memory_rw);
64484 }
64485
64486 static void unset_module_init_ro_nx(struct module *mod)
64487 {
64488 - set_page_attributes(mod->module_init + mod->init_text_size,
64489 - mod->module_init + mod->init_size,
64490 + set_page_attributes(mod->module_init_rw,
64491 + mod->module_init_rw + mod->init_size_rw,
64492 set_memory_x);
64493 - set_page_attributes(mod->module_init,
64494 - mod->module_init + mod->init_ro_size,
64495 + set_page_attributes(mod->module_init_rx,
64496 + mod->module_init_rx + mod->init_size_rx,
64497 set_memory_rw);
64498 }
64499
64500 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64501
64502 mutex_lock(&module_mutex);
64503 list_for_each_entry_rcu(mod, &modules, list) {
64504 - if ((mod->module_core) && (mod->core_text_size)) {
64505 - set_page_attributes(mod->module_core,
64506 - mod->module_core + mod->core_text_size,
64507 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64508 + set_page_attributes(mod->module_core_rx,
64509 + mod->module_core_rx + mod->core_size_rx,
64510 set_memory_rw);
64511 }
64512 - if ((mod->module_init) && (mod->init_text_size)) {
64513 - set_page_attributes(mod->module_init,
64514 - mod->module_init + mod->init_text_size,
64515 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64516 + set_page_attributes(mod->module_init_rx,
64517 + mod->module_init_rx + mod->init_size_rx,
64518 set_memory_rw);
64519 }
64520 }
64521 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64522
64523 mutex_lock(&module_mutex);
64524 list_for_each_entry_rcu(mod, &modules, list) {
64525 - if ((mod->module_core) && (mod->core_text_size)) {
64526 - set_page_attributes(mod->module_core,
64527 - mod->module_core + mod->core_text_size,
64528 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64529 + set_page_attributes(mod->module_core_rx,
64530 + mod->module_core_rx + mod->core_size_rx,
64531 set_memory_ro);
64532 }
64533 - if ((mod->module_init) && (mod->init_text_size)) {
64534 - set_page_attributes(mod->module_init,
64535 - mod->module_init + mod->init_text_size,
64536 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64537 + set_page_attributes(mod->module_init_rx,
64538 + mod->module_init_rx + mod->init_size_rx,
64539 set_memory_ro);
64540 }
64541 }
64542 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64543
64544 /* This may be NULL, but that's OK */
64545 unset_module_init_ro_nx(mod);
64546 - module_free(mod, mod->module_init);
64547 + module_free(mod, mod->module_init_rw);
64548 + module_free_exec(mod, mod->module_init_rx);
64549 kfree(mod->args);
64550 percpu_modfree(mod);
64551
64552 /* Free lock-classes: */
64553 - lockdep_free_key_range(mod->module_core, mod->core_size);
64554 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64555 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64556
64557 /* Finally, free the core (containing the module structure) */
64558 unset_module_core_ro_nx(mod);
64559 - module_free(mod, mod->module_core);
64560 + module_free_exec(mod, mod->module_core_rx);
64561 + module_free(mod, mod->module_core_rw);
64562
64563 #ifdef CONFIG_MPU
64564 update_protections(current->mm);
64565 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64566 unsigned int i;
64567 int ret = 0;
64568 const struct kernel_symbol *ksym;
64569 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64570 + int is_fs_load = 0;
64571 + int register_filesystem_found = 0;
64572 + char *p;
64573 +
64574 + p = strstr(mod->args, "grsec_modharden_fs");
64575 + if (p) {
64576 + char *endptr = p + strlen("grsec_modharden_fs");
64577 + /* copy \0 as well */
64578 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64579 + is_fs_load = 1;
64580 + }
64581 +#endif
64582
64583 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64584 const char *name = info->strtab + sym[i].st_name;
64585
64586 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64587 + /* it's a real shame this will never get ripped and copied
64588 + upstream! ;(
64589 + */
64590 + if (is_fs_load && !strcmp(name, "register_filesystem"))
64591 + register_filesystem_found = 1;
64592 +#endif
64593 +
64594 switch (sym[i].st_shndx) {
64595 case SHN_COMMON:
64596 /* We compiled with -fno-common. These are not
64597 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64598 ksym = resolve_symbol_wait(mod, info, name);
64599 /* Ok if resolved. */
64600 if (ksym && !IS_ERR(ksym)) {
64601 + pax_open_kernel();
64602 sym[i].st_value = ksym->value;
64603 + pax_close_kernel();
64604 break;
64605 }
64606
64607 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64608 secbase = (unsigned long)mod_percpu(mod);
64609 else
64610 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64611 + pax_open_kernel();
64612 sym[i].st_value += secbase;
64613 + pax_close_kernel();
64614 break;
64615 }
64616 }
64617
64618 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64619 + if (is_fs_load && !register_filesystem_found) {
64620 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64621 + ret = -EPERM;
64622 + }
64623 +#endif
64624 +
64625 return ret;
64626 }
64627
64628 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64629 || s->sh_entsize != ~0UL
64630 || strstarts(sname, ".init"))
64631 continue;
64632 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64633 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64634 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64635 + else
64636 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64637 DEBUGP("\t%s\n", name);
64638 }
64639 - switch (m) {
64640 - case 0: /* executable */
64641 - mod->core_size = debug_align(mod->core_size);
64642 - mod->core_text_size = mod->core_size;
64643 - break;
64644 - case 1: /* RO: text and ro-data */
64645 - mod->core_size = debug_align(mod->core_size);
64646 - mod->core_ro_size = mod->core_size;
64647 - break;
64648 - case 3: /* whole core */
64649 - mod->core_size = debug_align(mod->core_size);
64650 - break;
64651 - }
64652 }
64653
64654 DEBUGP("Init section allocation order:\n");
64655 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64656 || s->sh_entsize != ~0UL
64657 || !strstarts(sname, ".init"))
64658 continue;
64659 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64660 - | INIT_OFFSET_MASK);
64661 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64662 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64663 + else
64664 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64665 + s->sh_entsize |= INIT_OFFSET_MASK;
64666 DEBUGP("\t%s\n", sname);
64667 }
64668 - switch (m) {
64669 - case 0: /* executable */
64670 - mod->init_size = debug_align(mod->init_size);
64671 - mod->init_text_size = mod->init_size;
64672 - break;
64673 - case 1: /* RO: text and ro-data */
64674 - mod->init_size = debug_align(mod->init_size);
64675 - mod->init_ro_size = mod->init_size;
64676 - break;
64677 - case 3: /* whole init */
64678 - mod->init_size = debug_align(mod->init_size);
64679 - break;
64680 - }
64681 }
64682 }
64683
64684 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64685
64686 /* Put symbol section at end of init part of module. */
64687 symsect->sh_flags |= SHF_ALLOC;
64688 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64689 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64690 info->index.sym) | INIT_OFFSET_MASK;
64691 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64692
64693 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64694 }
64695
64696 /* Append room for core symbols at end of core part. */
64697 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64698 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64699 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64700 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64701
64702 /* Put string table section at end of init part of module. */
64703 strsect->sh_flags |= SHF_ALLOC;
64704 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64705 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64706 info->index.str) | INIT_OFFSET_MASK;
64707 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64708
64709 /* Append room for core symbols' strings at end of core part. */
64710 - info->stroffs = mod->core_size;
64711 + info->stroffs = mod->core_size_rx;
64712 __set_bit(0, info->strmap);
64713 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64714 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64715 }
64716
64717 static void add_kallsyms(struct module *mod, const struct load_info *info)
64718 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64719 /* Make sure we get permanent strtab: don't use info->strtab. */
64720 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64721
64722 + pax_open_kernel();
64723 +
64724 /* Set types up while we still have access to sections. */
64725 for (i = 0; i < mod->num_symtab; i++)
64726 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64727
64728 - mod->core_symtab = dst = mod->module_core + info->symoffs;
64729 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64730 src = mod->symtab;
64731 *dst = *src;
64732 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64733 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64734 }
64735 mod->core_num_syms = ndst;
64736
64737 - mod->core_strtab = s = mod->module_core + info->stroffs;
64738 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64739 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64740 if (test_bit(i, info->strmap))
64741 *++s = mod->strtab[i];
64742 +
64743 + pax_close_kernel();
64744 }
64745 #else
64746 static inline void layout_symtab(struct module *mod, struct load_info *info)
64747 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64748 return size == 0 ? NULL : vmalloc_exec(size);
64749 }
64750
64751 -static void *module_alloc_update_bounds(unsigned long size)
64752 +static void *module_alloc_update_bounds_rw(unsigned long size)
64753 {
64754 void *ret = module_alloc(size);
64755
64756 if (ret) {
64757 mutex_lock(&module_mutex);
64758 /* Update module bounds. */
64759 - if ((unsigned long)ret < module_addr_min)
64760 - module_addr_min = (unsigned long)ret;
64761 - if ((unsigned long)ret + size > module_addr_max)
64762 - module_addr_max = (unsigned long)ret + size;
64763 + if ((unsigned long)ret < module_addr_min_rw)
64764 + module_addr_min_rw = (unsigned long)ret;
64765 + if ((unsigned long)ret + size > module_addr_max_rw)
64766 + module_addr_max_rw = (unsigned long)ret + size;
64767 + mutex_unlock(&module_mutex);
64768 + }
64769 + return ret;
64770 +}
64771 +
64772 +static void *module_alloc_update_bounds_rx(unsigned long size)
64773 +{
64774 + void *ret = module_alloc_exec(size);
64775 +
64776 + if (ret) {
64777 + mutex_lock(&module_mutex);
64778 + /* Update module bounds. */
64779 + if ((unsigned long)ret < module_addr_min_rx)
64780 + module_addr_min_rx = (unsigned long)ret;
64781 + if ((unsigned long)ret + size > module_addr_max_rx)
64782 + module_addr_max_rx = (unsigned long)ret + size;
64783 mutex_unlock(&module_mutex);
64784 }
64785 return ret;
64786 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64787 static int check_modinfo(struct module *mod, struct load_info *info)
64788 {
64789 const char *modmagic = get_modinfo(info, "vermagic");
64790 + const char *license = get_modinfo(info, "license");
64791 int err;
64792
64793 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64794 + if (!license || !license_is_gpl_compatible(license))
64795 + return -ENOEXEC;
64796 +#endif
64797 +
64798 /* This is allowed: modprobe --force will invalidate it. */
64799 if (!modmagic) {
64800 err = try_to_force_load(mod, "bad vermagic");
64801 @@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64802 }
64803
64804 /* Set up license info based on the info section */
64805 - set_license(mod, get_modinfo(info, "license"));
64806 + set_license(mod, license);
64807
64808 return 0;
64809 }
64810 @@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64811 void *ptr;
64812
64813 /* Do the allocs. */
64814 - ptr = module_alloc_update_bounds(mod->core_size);
64815 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64816 /*
64817 * The pointer to this block is stored in the module structure
64818 * which is inside the block. Just mark it as not being a
64819 @@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64820 if (!ptr)
64821 return -ENOMEM;
64822
64823 - memset(ptr, 0, mod->core_size);
64824 - mod->module_core = ptr;
64825 + memset(ptr, 0, mod->core_size_rw);
64826 + mod->module_core_rw = ptr;
64827
64828 - ptr = module_alloc_update_bounds(mod->init_size);
64829 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64830 /*
64831 * The pointer to this block is stored in the module structure
64832 * which is inside the block. This block doesn't need to be
64833 * scanned as it contains data and code that will be freed
64834 * after the module is initialized.
64835 */
64836 - kmemleak_ignore(ptr);
64837 - if (!ptr && mod->init_size) {
64838 - module_free(mod, mod->module_core);
64839 + kmemleak_not_leak(ptr);
64840 + if (!ptr && mod->init_size_rw) {
64841 + module_free(mod, mod->module_core_rw);
64842 return -ENOMEM;
64843 }
64844 - memset(ptr, 0, mod->init_size);
64845 - mod->module_init = ptr;
64846 + memset(ptr, 0, mod->init_size_rw);
64847 + mod->module_init_rw = ptr;
64848 +
64849 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64850 + kmemleak_not_leak(ptr);
64851 + if (!ptr) {
64852 + module_free(mod, mod->module_init_rw);
64853 + module_free(mod, mod->module_core_rw);
64854 + return -ENOMEM;
64855 + }
64856 +
64857 + pax_open_kernel();
64858 + memset(ptr, 0, mod->core_size_rx);
64859 + pax_close_kernel();
64860 + mod->module_core_rx = ptr;
64861 +
64862 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64863 + kmemleak_not_leak(ptr);
64864 + if (!ptr && mod->init_size_rx) {
64865 + module_free_exec(mod, mod->module_core_rx);
64866 + module_free(mod, mod->module_init_rw);
64867 + module_free(mod, mod->module_core_rw);
64868 + return -ENOMEM;
64869 + }
64870 +
64871 + pax_open_kernel();
64872 + memset(ptr, 0, mod->init_size_rx);
64873 + pax_close_kernel();
64874 + mod->module_init_rx = ptr;
64875
64876 /* Transfer each section which specifies SHF_ALLOC */
64877 DEBUGP("final section addresses:\n");
64878 @@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64879 if (!(shdr->sh_flags & SHF_ALLOC))
64880 continue;
64881
64882 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
64883 - dest = mod->module_init
64884 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64885 - else
64886 - dest = mod->module_core + shdr->sh_entsize;
64887 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64888 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64889 + dest = mod->module_init_rw
64890 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64891 + else
64892 + dest = mod->module_init_rx
64893 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64894 + } else {
64895 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64896 + dest = mod->module_core_rw + shdr->sh_entsize;
64897 + else
64898 + dest = mod->module_core_rx + shdr->sh_entsize;
64899 + }
64900 +
64901 + if (shdr->sh_type != SHT_NOBITS) {
64902 +
64903 +#ifdef CONFIG_PAX_KERNEXEC
64904 +#ifdef CONFIG_X86_64
64905 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64906 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64907 +#endif
64908 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64909 + pax_open_kernel();
64910 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64911 + pax_close_kernel();
64912 + } else
64913 +#endif
64914
64915 - if (shdr->sh_type != SHT_NOBITS)
64916 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64917 + }
64918 /* Update sh_addr to point to copy in image. */
64919 - shdr->sh_addr = (unsigned long)dest;
64920 +
64921 +#ifdef CONFIG_PAX_KERNEXEC
64922 + if (shdr->sh_flags & SHF_EXECINSTR)
64923 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
64924 + else
64925 +#endif
64926 +
64927 + shdr->sh_addr = (unsigned long)dest;
64928 DEBUGP("\t0x%lx %s\n",
64929 shdr->sh_addr, info->secstrings + shdr->sh_name);
64930 }
64931 @@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64932 * Do it before processing of module parameters, so the module
64933 * can provide parameter accessor functions of its own.
64934 */
64935 - if (mod->module_init)
64936 - flush_icache_range((unsigned long)mod->module_init,
64937 - (unsigned long)mod->module_init
64938 - + mod->init_size);
64939 - flush_icache_range((unsigned long)mod->module_core,
64940 - (unsigned long)mod->module_core + mod->core_size);
64941 + if (mod->module_init_rx)
64942 + flush_icache_range((unsigned long)mod->module_init_rx,
64943 + (unsigned long)mod->module_init_rx
64944 + + mod->init_size_rx);
64945 + flush_icache_range((unsigned long)mod->module_core_rx,
64946 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64947
64948 set_fs(old_fs);
64949 }
64950 @@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64951 {
64952 kfree(info->strmap);
64953 percpu_modfree(mod);
64954 - module_free(mod, mod->module_init);
64955 - module_free(mod, mod->module_core);
64956 + module_free_exec(mod, mod->module_init_rx);
64957 + module_free_exec(mod, mod->module_core_rx);
64958 + module_free(mod, mod->module_init_rw);
64959 + module_free(mod, mod->module_core_rw);
64960 }
64961
64962 int __weak module_finalize(const Elf_Ehdr *hdr,
64963 @@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64964 if (err)
64965 goto free_unload;
64966
64967 + /* Now copy in args */
64968 + mod->args = strndup_user(uargs, ~0UL >> 1);
64969 + if (IS_ERR(mod->args)) {
64970 + err = PTR_ERR(mod->args);
64971 + goto free_unload;
64972 + }
64973 +
64974 /* Set up MODINFO_ATTR fields */
64975 setup_modinfo(mod, &info);
64976
64977 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64978 + {
64979 + char *p, *p2;
64980 +
64981 + if (strstr(mod->args, "grsec_modharden_netdev")) {
64982 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64983 + err = -EPERM;
64984 + goto free_modinfo;
64985 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64986 + p += strlen("grsec_modharden_normal");
64987 + p2 = strstr(p, "_");
64988 + if (p2) {
64989 + *p2 = '\0';
64990 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64991 + *p2 = '_';
64992 + }
64993 + err = -EPERM;
64994 + goto free_modinfo;
64995 + }
64996 + }
64997 +#endif
64998 +
64999 /* Fix up syms, so that st_value is a pointer to location. */
65000 err = simplify_symbols(mod, &info);
65001 if (err < 0)
65002 @@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
65003
65004 flush_module_icache(mod);
65005
65006 - /* Now copy in args */
65007 - mod->args = strndup_user(uargs, ~0UL >> 1);
65008 - if (IS_ERR(mod->args)) {
65009 - err = PTR_ERR(mod->args);
65010 - goto free_arch_cleanup;
65011 - }
65012 -
65013 /* Mark state as coming so strong_try_module_get() ignores us. */
65014 mod->state = MODULE_STATE_COMING;
65015
65016 @@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
65017 unlock:
65018 mutex_unlock(&module_mutex);
65019 synchronize_sched();
65020 - kfree(mod->args);
65021 - free_arch_cleanup:
65022 module_arch_cleanup(mod);
65023 free_modinfo:
65024 free_modinfo(mod);
65025 + kfree(mod->args);
65026 free_unload:
65027 module_unload_free(mod);
65028 free_module:
65029 @@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
65030 MODULE_STATE_COMING, mod);
65031
65032 /* Set RO and NX regions for core */
65033 - set_section_ro_nx(mod->module_core,
65034 - mod->core_text_size,
65035 - mod->core_ro_size,
65036 - mod->core_size);
65037 + set_section_ro_nx(mod->module_core_rx,
65038 + mod->core_size_rx,
65039 + mod->core_size_rx,
65040 + mod->core_size_rx);
65041
65042 /* Set RO and NX regions for init */
65043 - set_section_ro_nx(mod->module_init,
65044 - mod->init_text_size,
65045 - mod->init_ro_size,
65046 - mod->init_size);
65047 + set_section_ro_nx(mod->module_init_rx,
65048 + mod->init_size_rx,
65049 + mod->init_size_rx,
65050 + mod->init_size_rx);
65051
65052 do_mod_ctors(mod);
65053 /* Start the module */
65054 @@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
65055 mod->strtab = mod->core_strtab;
65056 #endif
65057 unset_module_init_ro_nx(mod);
65058 - module_free(mod, mod->module_init);
65059 - mod->module_init = NULL;
65060 - mod->init_size = 0;
65061 - mod->init_ro_size = 0;
65062 - mod->init_text_size = 0;
65063 + module_free(mod, mod->module_init_rw);
65064 + module_free_exec(mod, mod->module_init_rx);
65065 + mod->module_init_rw = NULL;
65066 + mod->module_init_rx = NULL;
65067 + mod->init_size_rw = 0;
65068 + mod->init_size_rx = 0;
65069 mutex_unlock(&module_mutex);
65070
65071 return 0;
65072 @@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
65073 unsigned long nextval;
65074
65075 /* At worse, next value is at end of module */
65076 - if (within_module_init(addr, mod))
65077 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
65078 + if (within_module_init_rx(addr, mod))
65079 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
65080 + else if (within_module_init_rw(addr, mod))
65081 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
65082 + else if (within_module_core_rx(addr, mod))
65083 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
65084 + else if (within_module_core_rw(addr, mod))
65085 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
65086 else
65087 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
65088 + return NULL;
65089
65090 /* Scan for closest preceding symbol, and next symbol. (ELF
65091 starts real symbols at 1). */
65092 @@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
65093 char buf[8];
65094
65095 seq_printf(m, "%s %u",
65096 - mod->name, mod->init_size + mod->core_size);
65097 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
65098 print_unload_info(m, mod);
65099
65100 /* Informative for users. */
65101 @@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
65102 mod->state == MODULE_STATE_COMING ? "Loading":
65103 "Live");
65104 /* Used by oprofile and other similar tools. */
65105 - seq_printf(m, " 0x%pK", mod->module_core);
65106 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
65107
65108 /* Taints info */
65109 if (mod->taints)
65110 @@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
65111
65112 static int __init proc_modules_init(void)
65113 {
65114 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65115 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65116 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65117 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65118 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
65119 +#else
65120 proc_create("modules", 0, NULL, &proc_modules_operations);
65121 +#endif
65122 +#else
65123 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65124 +#endif
65125 return 0;
65126 }
65127 module_init(proc_modules_init);
65128 @@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
65129 {
65130 struct module *mod;
65131
65132 - if (addr < module_addr_min || addr > module_addr_max)
65133 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
65134 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
65135 return NULL;
65136
65137 list_for_each_entry_rcu(mod, &modules, list)
65138 - if (within_module_core(addr, mod)
65139 - || within_module_init(addr, mod))
65140 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
65141 return mod;
65142 return NULL;
65143 }
65144 @@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
65145 */
65146 struct module *__module_text_address(unsigned long addr)
65147 {
65148 - struct module *mod = __module_address(addr);
65149 + struct module *mod;
65150 +
65151 +#ifdef CONFIG_X86_32
65152 + addr = ktla_ktva(addr);
65153 +#endif
65154 +
65155 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
65156 + return NULL;
65157 +
65158 + mod = __module_address(addr);
65159 +
65160 if (mod) {
65161 /* Make sure it's within the text section. */
65162 - if (!within(addr, mod->module_init, mod->init_text_size)
65163 - && !within(addr, mod->module_core, mod->core_text_size))
65164 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
65165 mod = NULL;
65166 }
65167 return mod;
65168 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
65169 index 7e3443f..b2a1e6b 100644
65170 --- a/kernel/mutex-debug.c
65171 +++ b/kernel/mutex-debug.c
65172 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
65173 }
65174
65175 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65176 - struct thread_info *ti)
65177 + struct task_struct *task)
65178 {
65179 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
65180
65181 /* Mark the current thread as blocked on the lock: */
65182 - ti->task->blocked_on = waiter;
65183 + task->blocked_on = waiter;
65184 }
65185
65186 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65187 - struct thread_info *ti)
65188 + struct task_struct *task)
65189 {
65190 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
65191 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
65192 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
65193 - ti->task->blocked_on = NULL;
65194 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
65195 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
65196 + task->blocked_on = NULL;
65197
65198 list_del_init(&waiter->list);
65199 waiter->task = NULL;
65200 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
65201 index 0799fd3..d06ae3b 100644
65202 --- a/kernel/mutex-debug.h
65203 +++ b/kernel/mutex-debug.h
65204 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
65205 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
65206 extern void debug_mutex_add_waiter(struct mutex *lock,
65207 struct mutex_waiter *waiter,
65208 - struct thread_info *ti);
65209 + struct task_struct *task);
65210 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65211 - struct thread_info *ti);
65212 + struct task_struct *task);
65213 extern void debug_mutex_unlock(struct mutex *lock);
65214 extern void debug_mutex_init(struct mutex *lock, const char *name,
65215 struct lock_class_key *key);
65216 diff --git a/kernel/mutex.c b/kernel/mutex.c
65217 index 89096dd..f91ebc5 100644
65218 --- a/kernel/mutex.c
65219 +++ b/kernel/mutex.c
65220 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65221 spin_lock_mutex(&lock->wait_lock, flags);
65222
65223 debug_mutex_lock_common(lock, &waiter);
65224 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
65225 + debug_mutex_add_waiter(lock, &waiter, task);
65226
65227 /* add waiting tasks to the end of the waitqueue (FIFO): */
65228 list_add_tail(&waiter.list, &lock->wait_list);
65229 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65230 * TASK_UNINTERRUPTIBLE case.)
65231 */
65232 if (unlikely(signal_pending_state(state, task))) {
65233 - mutex_remove_waiter(lock, &waiter,
65234 - task_thread_info(task));
65235 + mutex_remove_waiter(lock, &waiter, task);
65236 mutex_release(&lock->dep_map, 1, ip);
65237 spin_unlock_mutex(&lock->wait_lock, flags);
65238
65239 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65240 done:
65241 lock_acquired(&lock->dep_map, ip);
65242 /* got the lock - rejoice! */
65243 - mutex_remove_waiter(lock, &waiter, current_thread_info());
65244 + mutex_remove_waiter(lock, &waiter, task);
65245 mutex_set_owner(lock);
65246
65247 /* set it to 0 if there are no waiters left: */
65248 diff --git a/kernel/padata.c b/kernel/padata.c
65249 index b452599..5d68f4e 100644
65250 --- a/kernel/padata.c
65251 +++ b/kernel/padata.c
65252 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
65253 padata->pd = pd;
65254 padata->cb_cpu = cb_cpu;
65255
65256 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
65257 - atomic_set(&pd->seq_nr, -1);
65258 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
65259 + atomic_set_unchecked(&pd->seq_nr, -1);
65260
65261 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
65262 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
65263
65264 target_cpu = padata_cpu_hash(padata);
65265 queue = per_cpu_ptr(pd->pqueue, target_cpu);
65266 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
65267 padata_init_pqueues(pd);
65268 padata_init_squeues(pd);
65269 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
65270 - atomic_set(&pd->seq_nr, -1);
65271 + atomic_set_unchecked(&pd->seq_nr, -1);
65272 atomic_set(&pd->reorder_objects, 0);
65273 atomic_set(&pd->refcnt, 0);
65274 pd->pinst = pinst;
65275 diff --git a/kernel/panic.c b/kernel/panic.c
65276 index 3458469..342c500 100644
65277 --- a/kernel/panic.c
65278 +++ b/kernel/panic.c
65279 @@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
65280 va_end(args);
65281 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
65282 #ifdef CONFIG_DEBUG_BUGVERBOSE
65283 - dump_stack();
65284 + /*
65285 + * Avoid nested stack-dumping if a panic occurs during oops processing
65286 + */
65287 + if (!oops_in_progress)
65288 + dump_stack();
65289 #endif
65290
65291 /*
65292 @@ -382,7 +386,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
65293 const char *board;
65294
65295 printk(KERN_WARNING "------------[ cut here ]------------\n");
65296 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
65297 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
65298 board = dmi_get_system_info(DMI_PRODUCT_NAME);
65299 if (board)
65300 printk(KERN_WARNING "Hardware name: %s\n", board);
65301 @@ -437,7 +441,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
65302 */
65303 void __stack_chk_fail(void)
65304 {
65305 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
65306 + dump_stack();
65307 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
65308 __builtin_return_address(0));
65309 }
65310 EXPORT_SYMBOL(__stack_chk_fail);
65311 diff --git a/kernel/pid.c b/kernel/pid.c
65312 index fa5f722..0c93e57 100644
65313 --- a/kernel/pid.c
65314 +++ b/kernel/pid.c
65315 @@ -33,6 +33,7 @@
65316 #include <linux/rculist.h>
65317 #include <linux/bootmem.h>
65318 #include <linux/hash.h>
65319 +#include <linux/security.h>
65320 #include <linux/pid_namespace.h>
65321 #include <linux/init_task.h>
65322 #include <linux/syscalls.h>
65323 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
65324
65325 int pid_max = PID_MAX_DEFAULT;
65326
65327 -#define RESERVED_PIDS 300
65328 +#define RESERVED_PIDS 500
65329
65330 int pid_max_min = RESERVED_PIDS + 1;
65331 int pid_max_max = PID_MAX_LIMIT;
65332 @@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
65333 */
65334 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65335 {
65336 + struct task_struct *task;
65337 +
65338 rcu_lockdep_assert(rcu_read_lock_held(),
65339 "find_task_by_pid_ns() needs rcu_read_lock()"
65340 " protection");
65341 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65342 +
65343 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65344 +
65345 + if (gr_pid_is_chrooted(task))
65346 + return NULL;
65347 +
65348 + return task;
65349 }
65350
65351 struct task_struct *find_task_by_vpid(pid_t vnr)
65352 @@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
65353 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65354 }
65355
65356 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65357 +{
65358 + rcu_lockdep_assert(rcu_read_lock_held(),
65359 + "find_task_by_pid_ns() needs rcu_read_lock()"
65360 + " protection");
65361 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65362 +}
65363 +
65364 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65365 {
65366 struct pid *pid;
65367 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
65368 index e7cb76d..75eceb3 100644
65369 --- a/kernel/posix-cpu-timers.c
65370 +++ b/kernel/posix-cpu-timers.c
65371 @@ -6,6 +6,7 @@
65372 #include <linux/posix-timers.h>
65373 #include <linux/errno.h>
65374 #include <linux/math64.h>
65375 +#include <linux/security.h>
65376 #include <asm/uaccess.h>
65377 #include <linux/kernel_stat.h>
65378 #include <trace/events/timer.h>
65379 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
65380
65381 static __init int init_posix_cpu_timers(void)
65382 {
65383 - struct k_clock process = {
65384 + static struct k_clock process = {
65385 .clock_getres = process_cpu_clock_getres,
65386 .clock_get = process_cpu_clock_get,
65387 .timer_create = process_cpu_timer_create,
65388 .nsleep = process_cpu_nsleep,
65389 .nsleep_restart = process_cpu_nsleep_restart,
65390 };
65391 - struct k_clock thread = {
65392 + static struct k_clock thread = {
65393 .clock_getres = thread_cpu_clock_getres,
65394 .clock_get = thread_cpu_clock_get,
65395 .timer_create = thread_cpu_timer_create,
65396 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
65397 index 69185ae..cc2847a 100644
65398 --- a/kernel/posix-timers.c
65399 +++ b/kernel/posix-timers.c
65400 @@ -43,6 +43,7 @@
65401 #include <linux/idr.h>
65402 #include <linux/posix-clock.h>
65403 #include <linux/posix-timers.h>
65404 +#include <linux/grsecurity.h>
65405 #include <linux/syscalls.h>
65406 #include <linux/wait.h>
65407 #include <linux/workqueue.h>
65408 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65409 * which we beg off on and pass to do_sys_settimeofday().
65410 */
65411
65412 -static struct k_clock posix_clocks[MAX_CLOCKS];
65413 +static struct k_clock *posix_clocks[MAX_CLOCKS];
65414
65415 /*
65416 * These ones are defined below.
65417 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
65418 */
65419 static __init int init_posix_timers(void)
65420 {
65421 - struct k_clock clock_realtime = {
65422 + static struct k_clock clock_realtime = {
65423 .clock_getres = hrtimer_get_res,
65424 .clock_get = posix_clock_realtime_get,
65425 .clock_set = posix_clock_realtime_set,
65426 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
65427 .timer_get = common_timer_get,
65428 .timer_del = common_timer_del,
65429 };
65430 - struct k_clock clock_monotonic = {
65431 + static struct k_clock clock_monotonic = {
65432 .clock_getres = hrtimer_get_res,
65433 .clock_get = posix_ktime_get_ts,
65434 .nsleep = common_nsleep,
65435 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
65436 .timer_get = common_timer_get,
65437 .timer_del = common_timer_del,
65438 };
65439 - struct k_clock clock_monotonic_raw = {
65440 + static struct k_clock clock_monotonic_raw = {
65441 .clock_getres = hrtimer_get_res,
65442 .clock_get = posix_get_monotonic_raw,
65443 };
65444 - struct k_clock clock_realtime_coarse = {
65445 + static struct k_clock clock_realtime_coarse = {
65446 .clock_getres = posix_get_coarse_res,
65447 .clock_get = posix_get_realtime_coarse,
65448 };
65449 - struct k_clock clock_monotonic_coarse = {
65450 + static struct k_clock clock_monotonic_coarse = {
65451 .clock_getres = posix_get_coarse_res,
65452 .clock_get = posix_get_monotonic_coarse,
65453 };
65454 - struct k_clock clock_boottime = {
65455 + static struct k_clock clock_boottime = {
65456 .clock_getres = hrtimer_get_res,
65457 .clock_get = posix_get_boottime,
65458 .nsleep = common_nsleep,
65459 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65460 return;
65461 }
65462
65463 - posix_clocks[clock_id] = *new_clock;
65464 + posix_clocks[clock_id] = new_clock;
65465 }
65466 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65467
65468 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65469 return (id & CLOCKFD_MASK) == CLOCKFD ?
65470 &clock_posix_dynamic : &clock_posix_cpu;
65471
65472 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65473 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65474 return NULL;
65475 - return &posix_clocks[id];
65476 + return posix_clocks[id];
65477 }
65478
65479 static int common_timer_create(struct k_itimer *new_timer)
65480 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65481 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65482 return -EFAULT;
65483
65484 + /* only the CLOCK_REALTIME clock can be set, all other clocks
65485 + have their clock_set fptr set to a nosettime dummy function
65486 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65487 + call common_clock_set, which calls do_sys_settimeofday, which
65488 + we hook
65489 + */
65490 +
65491 return kc->clock_set(which_clock, &new_tp);
65492 }
65493
65494 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65495 index d523593..68197a4 100644
65496 --- a/kernel/power/poweroff.c
65497 +++ b/kernel/power/poweroff.c
65498 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65499 .enable_mask = SYSRQ_ENABLE_BOOT,
65500 };
65501
65502 -static int pm_sysrq_init(void)
65503 +static int __init pm_sysrq_init(void)
65504 {
65505 register_sysrq_key('o', &sysrq_poweroff_op);
65506 return 0;
65507 diff --git a/kernel/power/process.c b/kernel/power/process.c
65508 index 3d4b954..11af930 100644
65509 --- a/kernel/power/process.c
65510 +++ b/kernel/power/process.c
65511 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65512 u64 elapsed_csecs64;
65513 unsigned int elapsed_csecs;
65514 bool wakeup = false;
65515 + bool timedout = false;
65516
65517 do_gettimeofday(&start);
65518
65519 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65520
65521 while (true) {
65522 todo = 0;
65523 + if (time_after(jiffies, end_time))
65524 + timedout = true;
65525 read_lock(&tasklist_lock);
65526 do_each_thread(g, p) {
65527 if (frozen(p) || !freezable(p))
65528 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65529 * try_to_stop() after schedule() in ptrace/signal
65530 * stop sees TIF_FREEZE.
65531 */
65532 - if (!task_is_stopped_or_traced(p) &&
65533 - !freezer_should_skip(p))
65534 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65535 todo++;
65536 + if (timedout) {
65537 + printk(KERN_ERR "Task refusing to freeze:\n");
65538 + sched_show_task(p);
65539 + }
65540 + }
65541 } while_each_thread(g, p);
65542 read_unlock(&tasklist_lock);
65543
65544 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65545 todo += wq_busy;
65546 }
65547
65548 - if (!todo || time_after(jiffies, end_time))
65549 + if (!todo || timedout)
65550 break;
65551
65552 if (pm_wakeup_pending()) {
65553 diff --git a/kernel/printk.c b/kernel/printk.c
65554 index 7982a0a..2095fdc 100644
65555 --- a/kernel/printk.c
65556 +++ b/kernel/printk.c
65557 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65558 if (from_file && type != SYSLOG_ACTION_OPEN)
65559 return 0;
65560
65561 +#ifdef CONFIG_GRKERNSEC_DMESG
65562 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65563 + return -EPERM;
65564 +#endif
65565 +
65566 if (syslog_action_restricted(type)) {
65567 if (capable(CAP_SYSLOG))
65568 return 0;
65569 diff --git a/kernel/profile.c b/kernel/profile.c
65570 index 76b8e77..a2930e8 100644
65571 --- a/kernel/profile.c
65572 +++ b/kernel/profile.c
65573 @@ -39,7 +39,7 @@ struct profile_hit {
65574 /* Oprofile timer tick hook */
65575 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65576
65577 -static atomic_t *prof_buffer;
65578 +static atomic_unchecked_t *prof_buffer;
65579 static unsigned long prof_len, prof_shift;
65580
65581 int prof_on __read_mostly;
65582 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65583 hits[i].pc = 0;
65584 continue;
65585 }
65586 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65587 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65588 hits[i].hits = hits[i].pc = 0;
65589 }
65590 }
65591 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65592 * Add the current hit(s) and flush the write-queue out
65593 * to the global buffer:
65594 */
65595 - atomic_add(nr_hits, &prof_buffer[pc]);
65596 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65597 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65598 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65599 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65600 hits[i].pc = hits[i].hits = 0;
65601 }
65602 out:
65603 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65604 {
65605 unsigned long pc;
65606 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65607 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65608 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65609 }
65610 #endif /* !CONFIG_SMP */
65611
65612 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65613 return -EFAULT;
65614 buf++; p++; count--; read++;
65615 }
65616 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65617 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65618 if (copy_to_user(buf, (void *)pnt, count))
65619 return -EFAULT;
65620 read += count;
65621 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65622 }
65623 #endif
65624 profile_discard_flip_buffers();
65625 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65626 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65627 return count;
65628 }
65629
65630 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65631 index 78ab24a..332c915 100644
65632 --- a/kernel/ptrace.c
65633 +++ b/kernel/ptrace.c
65634 @@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65635 return ret;
65636 }
65637
65638 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65639 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65640 + unsigned int log)
65641 {
65642 const struct cred *cred = current_cred(), *tcred;
65643
65644 @@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65645 cred->gid == tcred->sgid &&
65646 cred->gid == tcred->gid))
65647 goto ok;
65648 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65649 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65650 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65651 goto ok;
65652 rcu_read_unlock();
65653 return -EPERM;
65654 @@ -207,7 +209,9 @@ ok:
65655 smp_rmb();
65656 if (task->mm)
65657 dumpable = get_dumpable(task->mm);
65658 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65659 + if (!dumpable &&
65660 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65661 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65662 return -EPERM;
65663
65664 return security_ptrace_access_check(task, mode);
65665 @@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65666 {
65667 int err;
65668 task_lock(task);
65669 - err = __ptrace_may_access(task, mode);
65670 + err = __ptrace_may_access(task, mode, 0);
65671 + task_unlock(task);
65672 + return !err;
65673 +}
65674 +
65675 +bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65676 +{
65677 + return __ptrace_may_access(task, mode, 0);
65678 +}
65679 +
65680 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65681 +{
65682 + int err;
65683 + task_lock(task);
65684 + err = __ptrace_may_access(task, mode, 1);
65685 task_unlock(task);
65686 return !err;
65687 }
65688 @@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65689 goto out;
65690
65691 task_lock(task);
65692 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65693 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65694 task_unlock(task);
65695 if (retval)
65696 goto unlock_creds;
65697 @@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65698 task->ptrace = PT_PTRACED;
65699 if (seize)
65700 task->ptrace |= PT_SEIZED;
65701 - if (task_ns_capable(task, CAP_SYS_PTRACE))
65702 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65703 task->ptrace |= PT_PTRACE_CAP;
65704
65705 __ptrace_link(task, current);
65706 @@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65707 break;
65708 return -EIO;
65709 }
65710 - if (copy_to_user(dst, buf, retval))
65711 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65712 return -EFAULT;
65713 copied += retval;
65714 src += retval;
65715 @@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65716 bool seized = child->ptrace & PT_SEIZED;
65717 int ret = -EIO;
65718 siginfo_t siginfo, *si;
65719 - void __user *datavp = (void __user *) data;
65720 + void __user *datavp = (__force void __user *) data;
65721 unsigned long __user *datalp = datavp;
65722 unsigned long flags;
65723
65724 @@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65725 goto out;
65726 }
65727
65728 + if (gr_handle_ptrace(child, request)) {
65729 + ret = -EPERM;
65730 + goto out_put_task_struct;
65731 + }
65732 +
65733 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65734 ret = ptrace_attach(child, request, data);
65735 /*
65736 * Some architectures need to do book-keeping after
65737 * a ptrace attach.
65738 */
65739 - if (!ret)
65740 + if (!ret) {
65741 arch_ptrace_attach(child);
65742 + gr_audit_ptrace(child);
65743 + }
65744 goto out_put_task_struct;
65745 }
65746
65747 @@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65748 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65749 if (copied != sizeof(tmp))
65750 return -EIO;
65751 - return put_user(tmp, (unsigned long __user *)data);
65752 + return put_user(tmp, (__force unsigned long __user *)data);
65753 }
65754
65755 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65756 @@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65757 goto out;
65758 }
65759
65760 + if (gr_handle_ptrace(child, request)) {
65761 + ret = -EPERM;
65762 + goto out_put_task_struct;
65763 + }
65764 +
65765 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65766 ret = ptrace_attach(child, request, data);
65767 /*
65768 * Some architectures need to do book-keeping after
65769 * a ptrace attach.
65770 */
65771 - if (!ret)
65772 + if (!ret) {
65773 arch_ptrace_attach(child);
65774 + gr_audit_ptrace(child);
65775 + }
65776 goto out_put_task_struct;
65777 }
65778
65779 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65780 index 764825c..3aa6ac4 100644
65781 --- a/kernel/rcutorture.c
65782 +++ b/kernel/rcutorture.c
65783 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65784 { 0 };
65785 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65786 { 0 };
65787 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65788 -static atomic_t n_rcu_torture_alloc;
65789 -static atomic_t n_rcu_torture_alloc_fail;
65790 -static atomic_t n_rcu_torture_free;
65791 -static atomic_t n_rcu_torture_mberror;
65792 -static atomic_t n_rcu_torture_error;
65793 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65794 +static atomic_unchecked_t n_rcu_torture_alloc;
65795 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65796 +static atomic_unchecked_t n_rcu_torture_free;
65797 +static atomic_unchecked_t n_rcu_torture_mberror;
65798 +static atomic_unchecked_t n_rcu_torture_error;
65799 static long n_rcu_torture_boost_ktrerror;
65800 static long n_rcu_torture_boost_rterror;
65801 static long n_rcu_torture_boost_failure;
65802 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65803
65804 spin_lock_bh(&rcu_torture_lock);
65805 if (list_empty(&rcu_torture_freelist)) {
65806 - atomic_inc(&n_rcu_torture_alloc_fail);
65807 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65808 spin_unlock_bh(&rcu_torture_lock);
65809 return NULL;
65810 }
65811 - atomic_inc(&n_rcu_torture_alloc);
65812 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65813 p = rcu_torture_freelist.next;
65814 list_del_init(p);
65815 spin_unlock_bh(&rcu_torture_lock);
65816 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65817 static void
65818 rcu_torture_free(struct rcu_torture *p)
65819 {
65820 - atomic_inc(&n_rcu_torture_free);
65821 + atomic_inc_unchecked(&n_rcu_torture_free);
65822 spin_lock_bh(&rcu_torture_lock);
65823 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65824 spin_unlock_bh(&rcu_torture_lock);
65825 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65826 i = rp->rtort_pipe_count;
65827 if (i > RCU_TORTURE_PIPE_LEN)
65828 i = RCU_TORTURE_PIPE_LEN;
65829 - atomic_inc(&rcu_torture_wcount[i]);
65830 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65831 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65832 rp->rtort_mbtest = 0;
65833 rcu_torture_free(rp);
65834 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65835 i = rp->rtort_pipe_count;
65836 if (i > RCU_TORTURE_PIPE_LEN)
65837 i = RCU_TORTURE_PIPE_LEN;
65838 - atomic_inc(&rcu_torture_wcount[i]);
65839 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65840 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65841 rp->rtort_mbtest = 0;
65842 list_del(&rp->rtort_free);
65843 @@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65844 i = old_rp->rtort_pipe_count;
65845 if (i > RCU_TORTURE_PIPE_LEN)
65846 i = RCU_TORTURE_PIPE_LEN;
65847 - atomic_inc(&rcu_torture_wcount[i]);
65848 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65849 old_rp->rtort_pipe_count++;
65850 cur_ops->deferred_free(old_rp);
65851 }
65852 @@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65853 return;
65854 }
65855 if (p->rtort_mbtest == 0)
65856 - atomic_inc(&n_rcu_torture_mberror);
65857 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65858 spin_lock(&rand_lock);
65859 cur_ops->read_delay(&rand);
65860 n_rcu_torture_timers++;
65861 @@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65862 continue;
65863 }
65864 if (p->rtort_mbtest == 0)
65865 - atomic_inc(&n_rcu_torture_mberror);
65866 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65867 cur_ops->read_delay(&rand);
65868 preempt_disable();
65869 pipe_count = p->rtort_pipe_count;
65870 @@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65871 rcu_torture_current,
65872 rcu_torture_current_version,
65873 list_empty(&rcu_torture_freelist),
65874 - atomic_read(&n_rcu_torture_alloc),
65875 - atomic_read(&n_rcu_torture_alloc_fail),
65876 - atomic_read(&n_rcu_torture_free),
65877 - atomic_read(&n_rcu_torture_mberror),
65878 + atomic_read_unchecked(&n_rcu_torture_alloc),
65879 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65880 + atomic_read_unchecked(&n_rcu_torture_free),
65881 + atomic_read_unchecked(&n_rcu_torture_mberror),
65882 n_rcu_torture_boost_ktrerror,
65883 n_rcu_torture_boost_rterror,
65884 n_rcu_torture_boost_failure,
65885 n_rcu_torture_boosts,
65886 n_rcu_torture_timers);
65887 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65888 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65889 n_rcu_torture_boost_ktrerror != 0 ||
65890 n_rcu_torture_boost_rterror != 0 ||
65891 n_rcu_torture_boost_failure != 0)
65892 @@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65893 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65894 if (i > 1) {
65895 cnt += sprintf(&page[cnt], "!!! ");
65896 - atomic_inc(&n_rcu_torture_error);
65897 + atomic_inc_unchecked(&n_rcu_torture_error);
65898 WARN_ON_ONCE(1);
65899 }
65900 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65901 @@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65902 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65903 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65904 cnt += sprintf(&page[cnt], " %d",
65905 - atomic_read(&rcu_torture_wcount[i]));
65906 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65907 }
65908 cnt += sprintf(&page[cnt], "\n");
65909 if (cur_ops->stats)
65910 @@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65911
65912 if (cur_ops->cleanup)
65913 cur_ops->cleanup();
65914 - if (atomic_read(&n_rcu_torture_error))
65915 + if (atomic_read_unchecked(&n_rcu_torture_error))
65916 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65917 else
65918 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65919 @@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65920
65921 rcu_torture_current = NULL;
65922 rcu_torture_current_version = 0;
65923 - atomic_set(&n_rcu_torture_alloc, 0);
65924 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65925 - atomic_set(&n_rcu_torture_free, 0);
65926 - atomic_set(&n_rcu_torture_mberror, 0);
65927 - atomic_set(&n_rcu_torture_error, 0);
65928 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65929 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65930 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65931 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65932 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65933 n_rcu_torture_boost_ktrerror = 0;
65934 n_rcu_torture_boost_rterror = 0;
65935 n_rcu_torture_boost_failure = 0;
65936 n_rcu_torture_boosts = 0;
65937 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65938 - atomic_set(&rcu_torture_wcount[i], 0);
65939 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65940 for_each_possible_cpu(cpu) {
65941 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65942 per_cpu(rcu_torture_count, cpu)[i] = 0;
65943 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65944 index 6b76d81..7afc1b3 100644
65945 --- a/kernel/rcutree.c
65946 +++ b/kernel/rcutree.c
65947 @@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65948 trace_rcu_dyntick("Start");
65949 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65950 smp_mb__before_atomic_inc(); /* See above. */
65951 - atomic_inc(&rdtp->dynticks);
65952 + atomic_inc_unchecked(&rdtp->dynticks);
65953 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65954 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65955 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65956 local_irq_restore(flags);
65957 }
65958
65959 @@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65960 return;
65961 }
65962 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65963 - atomic_inc(&rdtp->dynticks);
65964 + atomic_inc_unchecked(&rdtp->dynticks);
65965 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65966 smp_mb__after_atomic_inc(); /* See above. */
65967 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65968 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65969 trace_rcu_dyntick("End");
65970 local_irq_restore(flags);
65971 }
65972 @@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65973 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65974
65975 if (rdtp->dynticks_nmi_nesting == 0 &&
65976 - (atomic_read(&rdtp->dynticks) & 0x1))
65977 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65978 return;
65979 rdtp->dynticks_nmi_nesting++;
65980 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65981 - atomic_inc(&rdtp->dynticks);
65982 + atomic_inc_unchecked(&rdtp->dynticks);
65983 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65984 smp_mb__after_atomic_inc(); /* See above. */
65985 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65986 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65987 }
65988
65989 /**
65990 @@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
65991 return;
65992 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65993 smp_mb__before_atomic_inc(); /* See above. */
65994 - atomic_inc(&rdtp->dynticks);
65995 + atomic_inc_unchecked(&rdtp->dynticks);
65996 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65997 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65998 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65999 }
66000
66001 /**
66002 @@ -474,7 +474,7 @@ void rcu_irq_exit(void)
66003 */
66004 static int dyntick_save_progress_counter(struct rcu_data *rdp)
66005 {
66006 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
66007 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
66008 return 0;
66009 }
66010
66011 @@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
66012 unsigned int curr;
66013 unsigned int snap;
66014
66015 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
66016 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
66017 snap = (unsigned int)rdp->dynticks_snap;
66018
66019 /*
66020 @@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
66021 /*
66022 * Do RCU core processing for the current CPU.
66023 */
66024 -static void rcu_process_callbacks(struct softirq_action *unused)
66025 +static void rcu_process_callbacks(void)
66026 {
66027 trace_rcu_utilization("Start RCU core");
66028 __rcu_process_callbacks(&rcu_sched_state,
66029 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
66030 index 849ce9e..74bc9de 100644
66031 --- a/kernel/rcutree.h
66032 +++ b/kernel/rcutree.h
66033 @@ -86,7 +86,7 @@
66034 struct rcu_dynticks {
66035 int dynticks_nesting; /* Track irq/process nesting level. */
66036 int dynticks_nmi_nesting; /* Track NMI nesting level. */
66037 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
66038 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
66039 };
66040
66041 /* RCU's kthread states for tracing. */
66042 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
66043 index 4b9b9f8..2326053 100644
66044 --- a/kernel/rcutree_plugin.h
66045 +++ b/kernel/rcutree_plugin.h
66046 @@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
66047
66048 /* Clean up and exit. */
66049 smp_mb(); /* ensure expedited GP seen before counter increment. */
66050 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
66051 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
66052 unlock_mb_ret:
66053 mutex_unlock(&sync_rcu_preempt_exp_mutex);
66054 mb_ret:
66055 @@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
66056
66057 #else /* #ifndef CONFIG_SMP */
66058
66059 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
66060 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
66061 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
66062 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
66063
66064 static int synchronize_sched_expedited_cpu_stop(void *data)
66065 {
66066 @@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
66067 int firstsnap, s, snap, trycount = 0;
66068
66069 /* Note that atomic_inc_return() implies full memory barrier. */
66070 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
66071 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
66072 get_online_cpus();
66073
66074 /*
66075 @@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
66076 }
66077
66078 /* Check to see if someone else did our work for us. */
66079 - s = atomic_read(&sync_sched_expedited_done);
66080 + s = atomic_read_unchecked(&sync_sched_expedited_done);
66081 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
66082 smp_mb(); /* ensure test happens before caller kfree */
66083 return;
66084 @@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
66085 * grace period works for us.
66086 */
66087 get_online_cpus();
66088 - snap = atomic_read(&sync_sched_expedited_started) - 1;
66089 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
66090 smp_mb(); /* ensure read is before try_stop_cpus(). */
66091 }
66092
66093 @@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
66094 * than we did beat us to the punch.
66095 */
66096 do {
66097 - s = atomic_read(&sync_sched_expedited_done);
66098 + s = atomic_read_unchecked(&sync_sched_expedited_done);
66099 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
66100 smp_mb(); /* ensure test happens before caller kfree */
66101 break;
66102 }
66103 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
66104 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
66105
66106 put_online_cpus();
66107 }
66108 @@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
66109 for_each_online_cpu(thatcpu) {
66110 if (thatcpu == cpu)
66111 continue;
66112 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
66113 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
66114 thatcpu).dynticks);
66115 smp_mb(); /* Order sampling of snap with end of grace period. */
66116 if ((snap & 0x1) != 0) {
66117 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
66118 index 9feffa4..54058df 100644
66119 --- a/kernel/rcutree_trace.c
66120 +++ b/kernel/rcutree_trace.c
66121 @@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
66122 rdp->qs_pending);
66123 #ifdef CONFIG_NO_HZ
66124 seq_printf(m, " dt=%d/%d/%d df=%lu",
66125 - atomic_read(&rdp->dynticks->dynticks),
66126 + atomic_read_unchecked(&rdp->dynticks->dynticks),
66127 rdp->dynticks->dynticks_nesting,
66128 rdp->dynticks->dynticks_nmi_nesting,
66129 rdp->dynticks_fqs);
66130 @@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
66131 rdp->qs_pending);
66132 #ifdef CONFIG_NO_HZ
66133 seq_printf(m, ",%d,%d,%d,%lu",
66134 - atomic_read(&rdp->dynticks->dynticks),
66135 + atomic_read_unchecked(&rdp->dynticks->dynticks),
66136 rdp->dynticks->dynticks_nesting,
66137 rdp->dynticks->dynticks_nmi_nesting,
66138 rdp->dynticks_fqs);
66139 diff --git a/kernel/resource.c b/kernel/resource.c
66140 index 7640b3a..5879283 100644
66141 --- a/kernel/resource.c
66142 +++ b/kernel/resource.c
66143 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
66144
66145 static int __init ioresources_init(void)
66146 {
66147 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66148 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66149 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
66150 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
66151 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66152 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
66153 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
66154 +#endif
66155 +#else
66156 proc_create("ioports", 0, NULL, &proc_ioports_operations);
66157 proc_create("iomem", 0, NULL, &proc_iomem_operations);
66158 +#endif
66159 return 0;
66160 }
66161 __initcall(ioresources_init);
66162 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
66163 index 3d9f31c..7fefc9e 100644
66164 --- a/kernel/rtmutex-tester.c
66165 +++ b/kernel/rtmutex-tester.c
66166 @@ -20,7 +20,7 @@
66167 #define MAX_RT_TEST_MUTEXES 8
66168
66169 static spinlock_t rttest_lock;
66170 -static atomic_t rttest_event;
66171 +static atomic_unchecked_t rttest_event;
66172
66173 struct test_thread_data {
66174 int opcode;
66175 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66176
66177 case RTTEST_LOCKCONT:
66178 td->mutexes[td->opdata] = 1;
66179 - td->event = atomic_add_return(1, &rttest_event);
66180 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66181 return 0;
66182
66183 case RTTEST_RESET:
66184 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66185 return 0;
66186
66187 case RTTEST_RESETEVENT:
66188 - atomic_set(&rttest_event, 0);
66189 + atomic_set_unchecked(&rttest_event, 0);
66190 return 0;
66191
66192 default:
66193 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66194 return ret;
66195
66196 td->mutexes[id] = 1;
66197 - td->event = atomic_add_return(1, &rttest_event);
66198 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66199 rt_mutex_lock(&mutexes[id]);
66200 - td->event = atomic_add_return(1, &rttest_event);
66201 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66202 td->mutexes[id] = 4;
66203 return 0;
66204
66205 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66206 return ret;
66207
66208 td->mutexes[id] = 1;
66209 - td->event = atomic_add_return(1, &rttest_event);
66210 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66211 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
66212 - td->event = atomic_add_return(1, &rttest_event);
66213 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66214 td->mutexes[id] = ret ? 0 : 4;
66215 return ret ? -EINTR : 0;
66216
66217 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66218 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
66219 return ret;
66220
66221 - td->event = atomic_add_return(1, &rttest_event);
66222 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66223 rt_mutex_unlock(&mutexes[id]);
66224 - td->event = atomic_add_return(1, &rttest_event);
66225 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66226 td->mutexes[id] = 0;
66227 return 0;
66228
66229 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66230 break;
66231
66232 td->mutexes[dat] = 2;
66233 - td->event = atomic_add_return(1, &rttest_event);
66234 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66235 break;
66236
66237 default:
66238 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66239 return;
66240
66241 td->mutexes[dat] = 3;
66242 - td->event = atomic_add_return(1, &rttest_event);
66243 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66244 break;
66245
66246 case RTTEST_LOCKNOWAIT:
66247 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66248 return;
66249
66250 td->mutexes[dat] = 1;
66251 - td->event = atomic_add_return(1, &rttest_event);
66252 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66253 return;
66254
66255 default:
66256 diff --git a/kernel/sched.c b/kernel/sched.c
66257 index d6b149c..896cbb8 100644
66258 --- a/kernel/sched.c
66259 +++ b/kernel/sched.c
66260 @@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
66261 BUG(); /* the idle class will always have a runnable task */
66262 }
66263
66264 +#ifdef CONFIG_GRKERNSEC_SETXID
66265 +extern void gr_delayed_cred_worker(void);
66266 +static inline void gr_cred_schedule(void)
66267 +{
66268 + if (unlikely(current->delayed_cred))
66269 + gr_delayed_cred_worker();
66270 +}
66271 +#else
66272 +static inline void gr_cred_schedule(void)
66273 +{
66274 +}
66275 +#endif
66276 +
66277 /*
66278 * __schedule() is the main scheduler function.
66279 */
66280 @@ -4408,6 +4421,8 @@ need_resched:
66281
66282 schedule_debug(prev);
66283
66284 + gr_cred_schedule();
66285 +
66286 if (sched_feat(HRTICK))
66287 hrtick_clear(rq);
66288
66289 @@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
66290 /* convert nice value [19,-20] to rlimit style value [1,40] */
66291 int nice_rlim = 20 - nice;
66292
66293 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
66294 +
66295 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
66296 capable(CAP_SYS_NICE));
66297 }
66298 @@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
66299 if (nice > 19)
66300 nice = 19;
66301
66302 - if (increment < 0 && !can_nice(current, nice))
66303 + if (increment < 0 && (!can_nice(current, nice) ||
66304 + gr_handle_chroot_nice()))
66305 return -EPERM;
66306
66307 retval = security_task_setnice(current, nice);
66308 @@ -5288,6 +5306,7 @@ recheck:
66309 unsigned long rlim_rtprio =
66310 task_rlimit(p, RLIMIT_RTPRIO);
66311
66312 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
66313 /* can't set/change the rt policy */
66314 if (policy != p->policy && !rlim_rtprio)
66315 return -EPERM;
66316 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
66317 index 429242f..d7cca82 100644
66318 --- a/kernel/sched_autogroup.c
66319 +++ b/kernel/sched_autogroup.c
66320 @@ -7,7 +7,7 @@
66321
66322 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
66323 static struct autogroup autogroup_default;
66324 -static atomic_t autogroup_seq_nr;
66325 +static atomic_unchecked_t autogroup_seq_nr;
66326
66327 static void __init autogroup_init(struct task_struct *init_task)
66328 {
66329 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
66330
66331 kref_init(&ag->kref);
66332 init_rwsem(&ag->lock);
66333 - ag->id = atomic_inc_return(&autogroup_seq_nr);
66334 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
66335 ag->tg = tg;
66336 #ifdef CONFIG_RT_GROUP_SCHED
66337 /*
66338 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
66339 index 8a39fa3..34f3dbc 100644
66340 --- a/kernel/sched_fair.c
66341 +++ b/kernel/sched_fair.c
66342 @@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
66343 * run_rebalance_domains is triggered when needed from the scheduler tick.
66344 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
66345 */
66346 -static void run_rebalance_domains(struct softirq_action *h)
66347 +static void run_rebalance_domains(void)
66348 {
66349 int this_cpu = smp_processor_id();
66350 struct rq *this_rq = cpu_rq(this_cpu);
66351 diff --git a/kernel/signal.c b/kernel/signal.c
66352 index 2065515..aed2987 100644
66353 --- a/kernel/signal.c
66354 +++ b/kernel/signal.c
66355 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
66356
66357 int print_fatal_signals __read_mostly;
66358
66359 -static void __user *sig_handler(struct task_struct *t, int sig)
66360 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
66361 {
66362 return t->sighand->action[sig - 1].sa.sa_handler;
66363 }
66364
66365 -static int sig_handler_ignored(void __user *handler, int sig)
66366 +static int sig_handler_ignored(__sighandler_t handler, int sig)
66367 {
66368 /* Is it explicitly or implicitly ignored? */
66369 return handler == SIG_IGN ||
66370 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
66371 static int sig_task_ignored(struct task_struct *t, int sig,
66372 int from_ancestor_ns)
66373 {
66374 - void __user *handler;
66375 + __sighandler_t handler;
66376
66377 handler = sig_handler(t, sig);
66378
66379 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
66380 atomic_inc(&user->sigpending);
66381 rcu_read_unlock();
66382
66383 + if (!override_rlimit)
66384 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66385 +
66386 if (override_rlimit ||
66387 atomic_read(&user->sigpending) <=
66388 task_rlimit(t, RLIMIT_SIGPENDING)) {
66389 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
66390
66391 int unhandled_signal(struct task_struct *tsk, int sig)
66392 {
66393 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66394 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66395 if (is_global_init(tsk))
66396 return 1;
66397 if (handler != SIG_IGN && handler != SIG_DFL)
66398 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
66399 }
66400 }
66401
66402 + /* allow glibc communication via tgkill to other threads in our
66403 + thread group */
66404 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
66405 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66406 + && gr_handle_signal(t, sig))
66407 + return -EPERM;
66408 +
66409 return security_task_kill(t, info, sig, 0);
66410 }
66411
66412 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66413 return send_signal(sig, info, p, 1);
66414 }
66415
66416 -static int
66417 +int
66418 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66419 {
66420 return send_signal(sig, info, t, 0);
66421 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66422 unsigned long int flags;
66423 int ret, blocked, ignored;
66424 struct k_sigaction *action;
66425 + int is_unhandled = 0;
66426
66427 spin_lock_irqsave(&t->sighand->siglock, flags);
66428 action = &t->sighand->action[sig-1];
66429 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66430 }
66431 if (action->sa.sa_handler == SIG_DFL)
66432 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66433 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66434 + is_unhandled = 1;
66435 ret = specific_send_sig_info(sig, info, t);
66436 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66437
66438 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
66439 + normal operation */
66440 + if (is_unhandled) {
66441 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66442 + gr_handle_crash(t, sig);
66443 + }
66444 +
66445 return ret;
66446 }
66447
66448 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66449 ret = check_kill_permission(sig, info, p);
66450 rcu_read_unlock();
66451
66452 - if (!ret && sig)
66453 + if (!ret && sig) {
66454 ret = do_send_sig_info(sig, info, p, true);
66455 + if (!ret)
66456 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66457 + }
66458
66459 return ret;
66460 }
66461 @@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66462 int error = -ESRCH;
66463
66464 rcu_read_lock();
66465 - p = find_task_by_vpid(pid);
66466 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66467 + /* allow glibc communication via tgkill to other threads in our
66468 + thread group */
66469 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66470 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
66471 + p = find_task_by_vpid_unrestricted(pid);
66472 + else
66473 +#endif
66474 + p = find_task_by_vpid(pid);
66475 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66476 error = check_kill_permission(sig, info, p);
66477 /*
66478 diff --git a/kernel/smp.c b/kernel/smp.c
66479 index db197d6..17aef0b 100644
66480 --- a/kernel/smp.c
66481 +++ b/kernel/smp.c
66482 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66483 }
66484 EXPORT_SYMBOL(smp_call_function);
66485
66486 -void ipi_call_lock(void)
66487 +void ipi_call_lock(void) __acquires(call_function.lock)
66488 {
66489 raw_spin_lock(&call_function.lock);
66490 }
66491
66492 -void ipi_call_unlock(void)
66493 +void ipi_call_unlock(void) __releases(call_function.lock)
66494 {
66495 raw_spin_unlock(&call_function.lock);
66496 }
66497
66498 -void ipi_call_lock_irq(void)
66499 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
66500 {
66501 raw_spin_lock_irq(&call_function.lock);
66502 }
66503
66504 -void ipi_call_unlock_irq(void)
66505 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
66506 {
66507 raw_spin_unlock_irq(&call_function.lock);
66508 }
66509 diff --git a/kernel/softirq.c b/kernel/softirq.c
66510 index 2c71d91..1021f81 100644
66511 --- a/kernel/softirq.c
66512 +++ b/kernel/softirq.c
66513 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66514
66515 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66516
66517 -char *softirq_to_name[NR_SOFTIRQS] = {
66518 +const char * const softirq_to_name[NR_SOFTIRQS] = {
66519 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66520 "TASKLET", "SCHED", "HRTIMER", "RCU"
66521 };
66522 @@ -235,7 +235,7 @@ restart:
66523 kstat_incr_softirqs_this_cpu(vec_nr);
66524
66525 trace_softirq_entry(vec_nr);
66526 - h->action(h);
66527 + h->action();
66528 trace_softirq_exit(vec_nr);
66529 if (unlikely(prev_count != preempt_count())) {
66530 printk(KERN_ERR "huh, entered softirq %u %s %p"
66531 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66532 local_irq_restore(flags);
66533 }
66534
66535 -void open_softirq(int nr, void (*action)(struct softirq_action *))
66536 +void open_softirq(int nr, void (*action)(void))
66537 {
66538 - softirq_vec[nr].action = action;
66539 + pax_open_kernel();
66540 + *(void **)&softirq_vec[nr].action = action;
66541 + pax_close_kernel();
66542 }
66543
66544 /*
66545 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66546
66547 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66548
66549 -static void tasklet_action(struct softirq_action *a)
66550 +static void tasklet_action(void)
66551 {
66552 struct tasklet_struct *list;
66553
66554 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66555 }
66556 }
66557
66558 -static void tasklet_hi_action(struct softirq_action *a)
66559 +static void tasklet_hi_action(void)
66560 {
66561 struct tasklet_struct *list;
66562
66563 diff --git a/kernel/sys.c b/kernel/sys.c
66564 index 481611f..0754d86 100644
66565 --- a/kernel/sys.c
66566 +++ b/kernel/sys.c
66567 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66568 error = -EACCES;
66569 goto out;
66570 }
66571 +
66572 + if (gr_handle_chroot_setpriority(p, niceval)) {
66573 + error = -EACCES;
66574 + goto out;
66575 + }
66576 +
66577 no_nice = security_task_setnice(p, niceval);
66578 if (no_nice) {
66579 error = no_nice;
66580 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66581 goto error;
66582 }
66583
66584 + if (gr_check_group_change(new->gid, new->egid, -1))
66585 + goto error;
66586 +
66587 if (rgid != (gid_t) -1 ||
66588 (egid != (gid_t) -1 && egid != old->gid))
66589 new->sgid = new->egid;
66590 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66591 old = current_cred();
66592
66593 retval = -EPERM;
66594 +
66595 + if (gr_check_group_change(gid, gid, gid))
66596 + goto error;
66597 +
66598 if (nsown_capable(CAP_SETGID))
66599 new->gid = new->egid = new->sgid = new->fsgid = gid;
66600 else if (gid == old->gid || gid == old->sgid)
66601 @@ -618,7 +631,7 @@ error:
66602 /*
66603 * change the user struct in a credentials set to match the new UID
66604 */
66605 -static int set_user(struct cred *new)
66606 +int set_user(struct cred *new)
66607 {
66608 struct user_struct *new_user;
66609
66610 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66611 goto error;
66612 }
66613
66614 + if (gr_check_user_change(new->uid, new->euid, -1))
66615 + goto error;
66616 +
66617 if (new->uid != old->uid) {
66618 retval = set_user(new);
66619 if (retval < 0)
66620 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66621 old = current_cred();
66622
66623 retval = -EPERM;
66624 +
66625 + if (gr_check_crash_uid(uid))
66626 + goto error;
66627 + if (gr_check_user_change(uid, uid, uid))
66628 + goto error;
66629 +
66630 if (nsown_capable(CAP_SETUID)) {
66631 new->suid = new->uid = uid;
66632 if (uid != old->uid) {
66633 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66634 goto error;
66635 }
66636
66637 + if (gr_check_user_change(ruid, euid, -1))
66638 + goto error;
66639 +
66640 if (ruid != (uid_t) -1) {
66641 new->uid = ruid;
66642 if (ruid != old->uid) {
66643 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66644 goto error;
66645 }
66646
66647 + if (gr_check_group_change(rgid, egid, -1))
66648 + goto error;
66649 +
66650 if (rgid != (gid_t) -1)
66651 new->gid = rgid;
66652 if (egid != (gid_t) -1)
66653 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66654 old = current_cred();
66655 old_fsuid = old->fsuid;
66656
66657 + if (gr_check_user_change(-1, -1, uid))
66658 + goto error;
66659 +
66660 if (uid == old->uid || uid == old->euid ||
66661 uid == old->suid || uid == old->fsuid ||
66662 nsown_capable(CAP_SETUID)) {
66663 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66664 }
66665 }
66666
66667 +error:
66668 abort_creds(new);
66669 return old_fsuid;
66670
66671 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66672 if (gid == old->gid || gid == old->egid ||
66673 gid == old->sgid || gid == old->fsgid ||
66674 nsown_capable(CAP_SETGID)) {
66675 + if (gr_check_group_change(-1, -1, gid))
66676 + goto error;
66677 +
66678 if (gid != old_fsgid) {
66679 new->fsgid = gid;
66680 goto change_okay;
66681 }
66682 }
66683
66684 +error:
66685 abort_creds(new);
66686 return old_fsgid;
66687
66688 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
66689 }
66690 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66691 snprintf(buf, len, "2.6.%u%s", v, rest);
66692 - ret = copy_to_user(release, buf, len);
66693 + if (len > sizeof(buf))
66694 + ret = -EFAULT;
66695 + else
66696 + ret = copy_to_user(release, buf, len);
66697 }
66698 return ret;
66699 }
66700 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66701 return -EFAULT;
66702
66703 down_read(&uts_sem);
66704 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
66705 + error = __copy_to_user(name->sysname, &utsname()->sysname,
66706 __OLD_UTS_LEN);
66707 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66708 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66709 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
66710 __OLD_UTS_LEN);
66711 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66712 - error |= __copy_to_user(&name->release, &utsname()->release,
66713 + error |= __copy_to_user(name->release, &utsname()->release,
66714 __OLD_UTS_LEN);
66715 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66716 - error |= __copy_to_user(&name->version, &utsname()->version,
66717 + error |= __copy_to_user(name->version, &utsname()->version,
66718 __OLD_UTS_LEN);
66719 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66720 - error |= __copy_to_user(&name->machine, &utsname()->machine,
66721 + error |= __copy_to_user(name->machine, &utsname()->machine,
66722 __OLD_UTS_LEN);
66723 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66724 up_read(&uts_sem);
66725 @@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66726 error = get_dumpable(me->mm);
66727 break;
66728 case PR_SET_DUMPABLE:
66729 - if (arg2 < 0 || arg2 > 1) {
66730 + if (arg2 > 1) {
66731 error = -EINVAL;
66732 break;
66733 }
66734 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66735 index ae27196..7506d69 100644
66736 --- a/kernel/sysctl.c
66737 +++ b/kernel/sysctl.c
66738 @@ -86,6 +86,13 @@
66739
66740
66741 #if defined(CONFIG_SYSCTL)
66742 +#include <linux/grsecurity.h>
66743 +#include <linux/grinternal.h>
66744 +
66745 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66746 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66747 + const int op);
66748 +extern int gr_handle_chroot_sysctl(const int op);
66749
66750 /* External variables not in a header file. */
66751 extern int sysctl_overcommit_memory;
66752 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66753 }
66754
66755 #endif
66756 +extern struct ctl_table grsecurity_table[];
66757
66758 static struct ctl_table root_table[];
66759 static struct ctl_table_root sysctl_table_root;
66760 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66761 int sysctl_legacy_va_layout;
66762 #endif
66763
66764 +#ifdef CONFIG_PAX_SOFTMODE
66765 +static ctl_table pax_table[] = {
66766 + {
66767 + .procname = "softmode",
66768 + .data = &pax_softmode,
66769 + .maxlen = sizeof(unsigned int),
66770 + .mode = 0600,
66771 + .proc_handler = &proc_dointvec,
66772 + },
66773 +
66774 + { }
66775 +};
66776 +#endif
66777 +
66778 /* The default sysctl tables: */
66779
66780 static struct ctl_table root_table[] = {
66781 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66782 #endif
66783
66784 static struct ctl_table kern_table[] = {
66785 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66786 + {
66787 + .procname = "grsecurity",
66788 + .mode = 0500,
66789 + .child = grsecurity_table,
66790 + },
66791 +#endif
66792 +
66793 +#ifdef CONFIG_PAX_SOFTMODE
66794 + {
66795 + .procname = "pax",
66796 + .mode = 0500,
66797 + .child = pax_table,
66798 + },
66799 +#endif
66800 +
66801 {
66802 .procname = "sched_child_runs_first",
66803 .data = &sysctl_sched_child_runs_first,
66804 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66805 .data = &modprobe_path,
66806 .maxlen = KMOD_PATH_LEN,
66807 .mode = 0644,
66808 - .proc_handler = proc_dostring,
66809 + .proc_handler = proc_dostring_modpriv,
66810 },
66811 {
66812 .procname = "modules_disabled",
66813 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66814 .extra1 = &zero,
66815 .extra2 = &one,
66816 },
66817 +#endif
66818 {
66819 .procname = "kptr_restrict",
66820 .data = &kptr_restrict,
66821 .maxlen = sizeof(int),
66822 .mode = 0644,
66823 .proc_handler = proc_dmesg_restrict,
66824 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66825 + .extra1 = &two,
66826 +#else
66827 .extra1 = &zero,
66828 +#endif
66829 .extra2 = &two,
66830 },
66831 -#endif
66832 {
66833 .procname = "ngroups_max",
66834 .data = &ngroups_max,
66835 @@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66836 .proc_handler = proc_dointvec_minmax,
66837 .extra1 = &zero,
66838 },
66839 + {
66840 + .procname = "heap_stack_gap",
66841 + .data = &sysctl_heap_stack_gap,
66842 + .maxlen = sizeof(sysctl_heap_stack_gap),
66843 + .mode = 0644,
66844 + .proc_handler = proc_doulongvec_minmax,
66845 + },
66846 #else
66847 {
66848 .procname = "nr_trim_pages",
66849 @@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66850 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66851 {
66852 int mode;
66853 + int error;
66854 +
66855 + if (table->parent != NULL && table->parent->procname != NULL &&
66856 + table->procname != NULL &&
66857 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66858 + return -EACCES;
66859 + if (gr_handle_chroot_sysctl(op))
66860 + return -EACCES;
66861 + error = gr_handle_sysctl(table, op);
66862 + if (error)
66863 + return error;
66864
66865 if (root->permissions)
66866 mode = root->permissions(root, current->nsproxy, table);
66867 @@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66868 buffer, lenp, ppos);
66869 }
66870
66871 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66872 + void __user *buffer, size_t *lenp, loff_t *ppos)
66873 +{
66874 + if (write && !capable(CAP_SYS_MODULE))
66875 + return -EPERM;
66876 +
66877 + return _proc_do_string(table->data, table->maxlen, write,
66878 + buffer, lenp, ppos);
66879 +}
66880 +
66881 static size_t proc_skip_spaces(char **buf)
66882 {
66883 size_t ret;
66884 @@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66885 len = strlen(tmp);
66886 if (len > *size)
66887 len = *size;
66888 + if (len > sizeof(tmp))
66889 + len = sizeof(tmp);
66890 if (copy_to_user(*buf, tmp, len))
66891 return -EFAULT;
66892 *size -= len;
66893 @@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66894 *i = val;
66895 } else {
66896 val = convdiv * (*i) / convmul;
66897 - if (!first)
66898 + if (!first) {
66899 err = proc_put_char(&buffer, &left, '\t');
66900 + if (err)
66901 + break;
66902 + }
66903 err = proc_put_long(&buffer, &left, val, false);
66904 if (err)
66905 break;
66906 @@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66907 return -ENOSYS;
66908 }
66909
66910 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66911 + void __user *buffer, size_t *lenp, loff_t *ppos)
66912 +{
66913 + return -ENOSYS;
66914 +}
66915 +
66916 int proc_dointvec(struct ctl_table *table, int write,
66917 void __user *buffer, size_t *lenp, loff_t *ppos)
66918 {
66919 @@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66920 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66921 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66922 EXPORT_SYMBOL(proc_dostring);
66923 +EXPORT_SYMBOL(proc_dostring_modpriv);
66924 EXPORT_SYMBOL(proc_doulongvec_minmax);
66925 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66926 EXPORT_SYMBOL(register_sysctl_table);
66927 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66928 index a650694..aaeeb20 100644
66929 --- a/kernel/sysctl_binary.c
66930 +++ b/kernel/sysctl_binary.c
66931 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66932 int i;
66933
66934 set_fs(KERNEL_DS);
66935 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66936 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66937 set_fs(old_fs);
66938 if (result < 0)
66939 goto out_kfree;
66940 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66941 }
66942
66943 set_fs(KERNEL_DS);
66944 - result = vfs_write(file, buffer, str - buffer, &pos);
66945 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66946 set_fs(old_fs);
66947 if (result < 0)
66948 goto out_kfree;
66949 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66950 int i;
66951
66952 set_fs(KERNEL_DS);
66953 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66954 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66955 set_fs(old_fs);
66956 if (result < 0)
66957 goto out_kfree;
66958 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66959 }
66960
66961 set_fs(KERNEL_DS);
66962 - result = vfs_write(file, buffer, str - buffer, &pos);
66963 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66964 set_fs(old_fs);
66965 if (result < 0)
66966 goto out_kfree;
66967 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66968 int i;
66969
66970 set_fs(KERNEL_DS);
66971 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66972 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66973 set_fs(old_fs);
66974 if (result < 0)
66975 goto out;
66976 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66977 __le16 dnaddr;
66978
66979 set_fs(KERNEL_DS);
66980 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66981 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66982 set_fs(old_fs);
66983 if (result < 0)
66984 goto out;
66985 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66986 le16_to_cpu(dnaddr) & 0x3ff);
66987
66988 set_fs(KERNEL_DS);
66989 - result = vfs_write(file, buf, len, &pos);
66990 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66991 set_fs(old_fs);
66992 if (result < 0)
66993 goto out;
66994 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
66995 index 362da65..ab8ef8c 100644
66996 --- a/kernel/sysctl_check.c
66997 +++ b/kernel/sysctl_check.c
66998 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
66999 set_fail(&fail, table, "Directory with extra2");
67000 } else {
67001 if ((table->proc_handler == proc_dostring) ||
67002 + (table->proc_handler == proc_dostring_modpriv) ||
67003 (table->proc_handler == proc_dointvec) ||
67004 (table->proc_handler == proc_dointvec_minmax) ||
67005 (table->proc_handler == proc_dointvec_jiffies) ||
67006 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
67007 index e660464..c8b9e67 100644
67008 --- a/kernel/taskstats.c
67009 +++ b/kernel/taskstats.c
67010 @@ -27,9 +27,12 @@
67011 #include <linux/cgroup.h>
67012 #include <linux/fs.h>
67013 #include <linux/file.h>
67014 +#include <linux/grsecurity.h>
67015 #include <net/genetlink.h>
67016 #include <linux/atomic.h>
67017
67018 +extern int gr_is_taskstats_denied(int pid);
67019 +
67020 /*
67021 * Maximum length of a cpumask that can be specified in
67022 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
67023 @@ -556,6 +559,9 @@ err:
67024
67025 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
67026 {
67027 + if (gr_is_taskstats_denied(current->pid))
67028 + return -EACCES;
67029 +
67030 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
67031 return cmd_attr_register_cpumask(info);
67032 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
67033 diff --git a/kernel/time.c b/kernel/time.c
67034 index 73e416d..cfc6f69 100644
67035 --- a/kernel/time.c
67036 +++ b/kernel/time.c
67037 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
67038 return error;
67039
67040 if (tz) {
67041 + /* we log in do_settimeofday called below, so don't log twice
67042 + */
67043 + if (!tv)
67044 + gr_log_timechange();
67045 +
67046 /* SMP safe, global irq locking makes it work. */
67047 sys_tz = *tz;
67048 update_vsyscall_tz();
67049 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
67050 index 8a46f5d..bbe6f9c 100644
67051 --- a/kernel/time/alarmtimer.c
67052 +++ b/kernel/time/alarmtimer.c
67053 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
67054 struct platform_device *pdev;
67055 int error = 0;
67056 int i;
67057 - struct k_clock alarm_clock = {
67058 + static struct k_clock alarm_clock = {
67059 .clock_getres = alarm_clock_getres,
67060 .clock_get = alarm_clock_get,
67061 .timer_create = alarm_timer_create,
67062 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
67063 index fd4a7b1..fae5c2a 100644
67064 --- a/kernel/time/tick-broadcast.c
67065 +++ b/kernel/time/tick-broadcast.c
67066 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
67067 * then clear the broadcast bit.
67068 */
67069 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
67070 - int cpu = smp_processor_id();
67071 + cpu = smp_processor_id();
67072
67073 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
67074 tick_broadcast_clear_oneshot(cpu);
67075 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
67076 index 2378413..be455fd 100644
67077 --- a/kernel/time/timekeeping.c
67078 +++ b/kernel/time/timekeeping.c
67079 @@ -14,6 +14,7 @@
67080 #include <linux/init.h>
67081 #include <linux/mm.h>
67082 #include <linux/sched.h>
67083 +#include <linux/grsecurity.h>
67084 #include <linux/syscore_ops.h>
67085 #include <linux/clocksource.h>
67086 #include <linux/jiffies.h>
67087 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
67088 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
67089 return -EINVAL;
67090
67091 + gr_log_timechange();
67092 +
67093 write_seqlock_irqsave(&xtime_lock, flags);
67094
67095 timekeeping_forward_now();
67096 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
67097 index 3258455..f35227d 100644
67098 --- a/kernel/time/timer_list.c
67099 +++ b/kernel/time/timer_list.c
67100 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
67101
67102 static void print_name_offset(struct seq_file *m, void *sym)
67103 {
67104 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67105 + SEQ_printf(m, "<%p>", NULL);
67106 +#else
67107 char symname[KSYM_NAME_LEN];
67108
67109 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
67110 SEQ_printf(m, "<%pK>", sym);
67111 else
67112 SEQ_printf(m, "%s", symname);
67113 +#endif
67114 }
67115
67116 static void
67117 @@ -112,7 +116,11 @@ next_one:
67118 static void
67119 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
67120 {
67121 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67122 + SEQ_printf(m, " .base: %p\n", NULL);
67123 +#else
67124 SEQ_printf(m, " .base: %pK\n", base);
67125 +#endif
67126 SEQ_printf(m, " .index: %d\n",
67127 base->index);
67128 SEQ_printf(m, " .resolution: %Lu nsecs\n",
67129 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
67130 {
67131 struct proc_dir_entry *pe;
67132
67133 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67134 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
67135 +#else
67136 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
67137 +#endif
67138 if (!pe)
67139 return -ENOMEM;
67140 return 0;
67141 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
67142 index 0b537f2..9e71eca 100644
67143 --- a/kernel/time/timer_stats.c
67144 +++ b/kernel/time/timer_stats.c
67145 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
67146 static unsigned long nr_entries;
67147 static struct entry entries[MAX_ENTRIES];
67148
67149 -static atomic_t overflow_count;
67150 +static atomic_unchecked_t overflow_count;
67151
67152 /*
67153 * The entries are in a hash-table, for fast lookup:
67154 @@ -140,7 +140,7 @@ static void reset_entries(void)
67155 nr_entries = 0;
67156 memset(entries, 0, sizeof(entries));
67157 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
67158 - atomic_set(&overflow_count, 0);
67159 + atomic_set_unchecked(&overflow_count, 0);
67160 }
67161
67162 static struct entry *alloc_entry(void)
67163 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
67164 if (likely(entry))
67165 entry->count++;
67166 else
67167 - atomic_inc(&overflow_count);
67168 + atomic_inc_unchecked(&overflow_count);
67169
67170 out_unlock:
67171 raw_spin_unlock_irqrestore(lock, flags);
67172 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
67173
67174 static void print_name_offset(struct seq_file *m, unsigned long addr)
67175 {
67176 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67177 + seq_printf(m, "<%p>", NULL);
67178 +#else
67179 char symname[KSYM_NAME_LEN];
67180
67181 if (lookup_symbol_name(addr, symname) < 0)
67182 seq_printf(m, "<%p>", (void *)addr);
67183 else
67184 seq_printf(m, "%s", symname);
67185 +#endif
67186 }
67187
67188 static int tstats_show(struct seq_file *m, void *v)
67189 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
67190
67191 seq_puts(m, "Timer Stats Version: v0.2\n");
67192 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
67193 - if (atomic_read(&overflow_count))
67194 + if (atomic_read_unchecked(&overflow_count))
67195 seq_printf(m, "Overflow: %d entries\n",
67196 - atomic_read(&overflow_count));
67197 + atomic_read_unchecked(&overflow_count));
67198
67199 for (i = 0; i < nr_entries; i++) {
67200 entry = entries + i;
67201 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
67202 {
67203 struct proc_dir_entry *pe;
67204
67205 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67206 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
67207 +#else
67208 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
67209 +#endif
67210 if (!pe)
67211 return -ENOMEM;
67212 return 0;
67213 diff --git a/kernel/timer.c b/kernel/timer.c
67214 index 9c3c62b..441690e 100644
67215 --- a/kernel/timer.c
67216 +++ b/kernel/timer.c
67217 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
67218 /*
67219 * This function runs timers and the timer-tq in bottom half context.
67220 */
67221 -static void run_timer_softirq(struct softirq_action *h)
67222 +static void run_timer_softirq(void)
67223 {
67224 struct tvec_base *base = __this_cpu_read(tvec_bases);
67225
67226 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
67227 index 16fc34a..efd8bb8 100644
67228 --- a/kernel/trace/blktrace.c
67229 +++ b/kernel/trace/blktrace.c
67230 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
67231 struct blk_trace *bt = filp->private_data;
67232 char buf[16];
67233
67234 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
67235 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
67236
67237 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
67238 }
67239 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
67240 return 1;
67241
67242 bt = buf->chan->private_data;
67243 - atomic_inc(&bt->dropped);
67244 + atomic_inc_unchecked(&bt->dropped);
67245 return 0;
67246 }
67247
67248 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
67249
67250 bt->dir = dir;
67251 bt->dev = dev;
67252 - atomic_set(&bt->dropped, 0);
67253 + atomic_set_unchecked(&bt->dropped, 0);
67254
67255 ret = -EIO;
67256 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
67257 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
67258 index 25b4f4d..6f4772d 100644
67259 --- a/kernel/trace/ftrace.c
67260 +++ b/kernel/trace/ftrace.c
67261 @@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
67262 if (unlikely(ftrace_disabled))
67263 return 0;
67264
67265 + ret = ftrace_arch_code_modify_prepare();
67266 + FTRACE_WARN_ON(ret);
67267 + if (ret)
67268 + return 0;
67269 +
67270 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
67271 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
67272 if (ret) {
67273 ftrace_bug(ret, ip);
67274 - return 0;
67275 }
67276 - return 1;
67277 + return ret ? 0 : 1;
67278 }
67279
67280 /*
67281 @@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
67282
67283 int
67284 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
67285 - void *data)
67286 + void *data)
67287 {
67288 struct ftrace_func_probe *entry;
67289 struct ftrace_page *pg;
67290 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
67291 index f2bd275..adaf3a2 100644
67292 --- a/kernel/trace/trace.c
67293 +++ b/kernel/trace/trace.c
67294 @@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
67295 };
67296 #endif
67297
67298 -static struct dentry *d_tracer;
67299 -
67300 struct dentry *tracing_init_dentry(void)
67301 {
67302 + static struct dentry *d_tracer;
67303 static int once;
67304
67305 if (d_tracer)
67306 @@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
67307 return d_tracer;
67308 }
67309
67310 -static struct dentry *d_percpu;
67311 -
67312 struct dentry *tracing_dentry_percpu(void)
67313 {
67314 + static struct dentry *d_percpu;
67315 static int once;
67316 struct dentry *d_tracer;
67317
67318 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
67319 index c212a7f..7b02394 100644
67320 --- a/kernel/trace/trace_events.c
67321 +++ b/kernel/trace/trace_events.c
67322 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
67323 struct ftrace_module_file_ops {
67324 struct list_head list;
67325 struct module *mod;
67326 - struct file_operations id;
67327 - struct file_operations enable;
67328 - struct file_operations format;
67329 - struct file_operations filter;
67330 };
67331
67332 static struct ftrace_module_file_ops *
67333 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
67334
67335 file_ops->mod = mod;
67336
67337 - file_ops->id = ftrace_event_id_fops;
67338 - file_ops->id.owner = mod;
67339 -
67340 - file_ops->enable = ftrace_enable_fops;
67341 - file_ops->enable.owner = mod;
67342 -
67343 - file_ops->filter = ftrace_event_filter_fops;
67344 - file_ops->filter.owner = mod;
67345 -
67346 - file_ops->format = ftrace_event_format_fops;
67347 - file_ops->format.owner = mod;
67348 + pax_open_kernel();
67349 + *(void **)&mod->trace_id.owner = mod;
67350 + *(void **)&mod->trace_enable.owner = mod;
67351 + *(void **)&mod->trace_filter.owner = mod;
67352 + *(void **)&mod->trace_format.owner = mod;
67353 + pax_close_kernel();
67354
67355 list_add(&file_ops->list, &ftrace_module_file_list);
67356
67357 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
67358
67359 for_each_event(call, start, end) {
67360 __trace_add_event_call(*call, mod,
67361 - &file_ops->id, &file_ops->enable,
67362 - &file_ops->filter, &file_ops->format);
67363 + &mod->trace_id, &mod->trace_enable,
67364 + &mod->trace_filter, &mod->trace_format);
67365 }
67366 }
67367
67368 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
67369 index 00d527c..7c5b1a3 100644
67370 --- a/kernel/trace/trace_kprobe.c
67371 +++ b/kernel/trace/trace_kprobe.c
67372 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67373 long ret;
67374 int maxlen = get_rloc_len(*(u32 *)dest);
67375 u8 *dst = get_rloc_data(dest);
67376 - u8 *src = addr;
67377 + const u8 __user *src = (const u8 __force_user *)addr;
67378 mm_segment_t old_fs = get_fs();
67379 if (!maxlen)
67380 return;
67381 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67382 pagefault_disable();
67383 do
67384 ret = __copy_from_user_inatomic(dst++, src++, 1);
67385 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67386 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67387 dst[-1] = '\0';
67388 pagefault_enable();
67389 set_fs(old_fs);
67390 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67391 ((u8 *)get_rloc_data(dest))[0] = '\0';
67392 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67393 } else
67394 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67395 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67396 get_rloc_offs(*(u32 *)dest));
67397 }
67398 /* Return the length of string -- including null terminal byte */
67399 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67400 set_fs(KERNEL_DS);
67401 pagefault_disable();
67402 do {
67403 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67404 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67405 len++;
67406 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67407 pagefault_enable();
67408 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67409 index fd3c8aa..5f324a6 100644
67410 --- a/kernel/trace/trace_mmiotrace.c
67411 +++ b/kernel/trace/trace_mmiotrace.c
67412 @@ -24,7 +24,7 @@ struct header_iter {
67413 static struct trace_array *mmio_trace_array;
67414 static bool overrun_detected;
67415 static unsigned long prev_overruns;
67416 -static atomic_t dropped_count;
67417 +static atomic_unchecked_t dropped_count;
67418
67419 static void mmio_reset_data(struct trace_array *tr)
67420 {
67421 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67422
67423 static unsigned long count_overruns(struct trace_iterator *iter)
67424 {
67425 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
67426 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67427 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67428
67429 if (over > prev_overruns)
67430 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67431 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67432 sizeof(*entry), 0, pc);
67433 if (!event) {
67434 - atomic_inc(&dropped_count);
67435 + atomic_inc_unchecked(&dropped_count);
67436 return;
67437 }
67438 entry = ring_buffer_event_data(event);
67439 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67440 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67441 sizeof(*entry), 0, pc);
67442 if (!event) {
67443 - atomic_inc(&dropped_count);
67444 + atomic_inc_unchecked(&dropped_count);
67445 return;
67446 }
67447 entry = ring_buffer_event_data(event);
67448 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67449 index 5199930..26c73a0 100644
67450 --- a/kernel/trace/trace_output.c
67451 +++ b/kernel/trace/trace_output.c
67452 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67453
67454 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67455 if (!IS_ERR(p)) {
67456 - p = mangle_path(s->buffer + s->len, p, "\n");
67457 + p = mangle_path(s->buffer + s->len, p, "\n\\");
67458 if (p) {
67459 s->len = p - s->buffer;
67460 return 1;
67461 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67462 index 77575b3..6e623d1 100644
67463 --- a/kernel/trace/trace_stack.c
67464 +++ b/kernel/trace/trace_stack.c
67465 @@ -50,7 +50,7 @@ static inline void check_stack(void)
67466 return;
67467
67468 /* we do not handle interrupt stacks yet */
67469 - if (!object_is_on_stack(&this_size))
67470 + if (!object_starts_on_stack(&this_size))
67471 return;
67472
67473 local_irq_save(flags);
67474 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67475 index 209b379..7f76423 100644
67476 --- a/kernel/trace/trace_workqueue.c
67477 +++ b/kernel/trace/trace_workqueue.c
67478 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67479 int cpu;
67480 pid_t pid;
67481 /* Can be inserted from interrupt or user context, need to be atomic */
67482 - atomic_t inserted;
67483 + atomic_unchecked_t inserted;
67484 /*
67485 * Don't need to be atomic, works are serialized in a single workqueue thread
67486 * on a single CPU.
67487 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67488 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67489 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67490 if (node->pid == wq_thread->pid) {
67491 - atomic_inc(&node->inserted);
67492 + atomic_inc_unchecked(&node->inserted);
67493 goto found;
67494 }
67495 }
67496 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67497 tsk = get_pid_task(pid, PIDTYPE_PID);
67498 if (tsk) {
67499 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67500 - atomic_read(&cws->inserted), cws->executed,
67501 + atomic_read_unchecked(&cws->inserted), cws->executed,
67502 tsk->comm);
67503 put_task_struct(tsk);
67504 }
67505 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67506 index 82928f5..92da771 100644
67507 --- a/lib/Kconfig.debug
67508 +++ b/lib/Kconfig.debug
67509 @@ -1103,6 +1103,7 @@ config LATENCYTOP
67510 depends on DEBUG_KERNEL
67511 depends on STACKTRACE_SUPPORT
67512 depends on PROC_FS
67513 + depends on !GRKERNSEC_HIDESYM
67514 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67515 select KALLSYMS
67516 select KALLSYMS_ALL
67517 diff --git a/lib/bitmap.c b/lib/bitmap.c
67518 index 0d4a127..33a06c7 100644
67519 --- a/lib/bitmap.c
67520 +++ b/lib/bitmap.c
67521 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67522 {
67523 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67524 u32 chunk;
67525 - const char __user __force *ubuf = (const char __user __force *)buf;
67526 + const char __user *ubuf = (const char __force_user *)buf;
67527
67528 bitmap_zero(maskp, nmaskbits);
67529
67530 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67531 {
67532 if (!access_ok(VERIFY_READ, ubuf, ulen))
67533 return -EFAULT;
67534 - return __bitmap_parse((const char __force *)ubuf,
67535 + return __bitmap_parse((const char __force_kernel *)ubuf,
67536 ulen, 1, maskp, nmaskbits);
67537
67538 }
67539 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67540 {
67541 unsigned a, b;
67542 int c, old_c, totaldigits;
67543 - const char __user __force *ubuf = (const char __user __force *)buf;
67544 + const char __user *ubuf = (const char __force_user *)buf;
67545 int exp_digit, in_range;
67546
67547 totaldigits = c = 0;
67548 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67549 {
67550 if (!access_ok(VERIFY_READ, ubuf, ulen))
67551 return -EFAULT;
67552 - return __bitmap_parselist((const char __force *)ubuf,
67553 + return __bitmap_parselist((const char __force_kernel *)ubuf,
67554 ulen, 1, maskp, nmaskbits);
67555 }
67556 EXPORT_SYMBOL(bitmap_parselist_user);
67557 diff --git a/lib/bug.c b/lib/bug.c
67558 index 1955209..cbbb2ad 100644
67559 --- a/lib/bug.c
67560 +++ b/lib/bug.c
67561 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67562 return BUG_TRAP_TYPE_NONE;
67563
67564 bug = find_bug(bugaddr);
67565 + if (!bug)
67566 + return BUG_TRAP_TYPE_NONE;
67567
67568 file = NULL;
67569 line = 0;
67570 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67571 index a78b7c6..2c73084 100644
67572 --- a/lib/debugobjects.c
67573 +++ b/lib/debugobjects.c
67574 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67575 if (limit > 4)
67576 return;
67577
67578 - is_on_stack = object_is_on_stack(addr);
67579 + is_on_stack = object_starts_on_stack(addr);
67580 if (is_on_stack == onstack)
67581 return;
67582
67583 diff --git a/lib/devres.c b/lib/devres.c
67584 index 7c0e953..f642b5c 100644
67585 --- a/lib/devres.c
67586 +++ b/lib/devres.c
67587 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67588 void devm_iounmap(struct device *dev, void __iomem *addr)
67589 {
67590 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67591 - (void *)addr));
67592 + (void __force *)addr));
67593 iounmap(addr);
67594 }
67595 EXPORT_SYMBOL(devm_iounmap);
67596 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67597 {
67598 ioport_unmap(addr);
67599 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67600 - devm_ioport_map_match, (void *)addr));
67601 + devm_ioport_map_match, (void __force *)addr));
67602 }
67603 EXPORT_SYMBOL(devm_ioport_unmap);
67604
67605 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67606 index fea790a..ebb0e82 100644
67607 --- a/lib/dma-debug.c
67608 +++ b/lib/dma-debug.c
67609 @@ -925,7 +925,7 @@ out:
67610
67611 static void check_for_stack(struct device *dev, void *addr)
67612 {
67613 - if (object_is_on_stack(addr))
67614 + if (object_starts_on_stack(addr))
67615 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67616 "stack [addr=%p]\n", addr);
67617 }
67618 diff --git a/lib/extable.c b/lib/extable.c
67619 index 4cac81e..63e9b8f 100644
67620 --- a/lib/extable.c
67621 +++ b/lib/extable.c
67622 @@ -13,6 +13,7 @@
67623 #include <linux/init.h>
67624 #include <linux/sort.h>
67625 #include <asm/uaccess.h>
67626 +#include <asm/pgtable.h>
67627
67628 #ifndef ARCH_HAS_SORT_EXTABLE
67629 /*
67630 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67631 void sort_extable(struct exception_table_entry *start,
67632 struct exception_table_entry *finish)
67633 {
67634 + pax_open_kernel();
67635 sort(start, finish - start, sizeof(struct exception_table_entry),
67636 cmp_ex, NULL);
67637 + pax_close_kernel();
67638 }
67639
67640 #ifdef CONFIG_MODULES
67641 diff --git a/lib/inflate.c b/lib/inflate.c
67642 index 013a761..c28f3fc 100644
67643 --- a/lib/inflate.c
67644 +++ b/lib/inflate.c
67645 @@ -269,7 +269,7 @@ static void free(void *where)
67646 malloc_ptr = free_mem_ptr;
67647 }
67648 #else
67649 -#define malloc(a) kmalloc(a, GFP_KERNEL)
67650 +#define malloc(a) kmalloc((a), GFP_KERNEL)
67651 #define free(a) kfree(a)
67652 #endif
67653
67654 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67655 index bd2bea9..6b3c95e 100644
67656 --- a/lib/is_single_threaded.c
67657 +++ b/lib/is_single_threaded.c
67658 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67659 struct task_struct *p, *t;
67660 bool ret;
67661
67662 + if (!mm)
67663 + return true;
67664 +
67665 if (atomic_read(&task->signal->live) != 1)
67666 return false;
67667
67668 diff --git a/lib/kref.c b/lib/kref.c
67669 index 3efb882..8492f4c 100644
67670 --- a/lib/kref.c
67671 +++ b/lib/kref.c
67672 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67673 */
67674 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67675 {
67676 - WARN_ON(release == NULL);
67677 + BUG_ON(release == NULL);
67678 WARN_ON(release == (void (*)(struct kref *))kfree);
67679
67680 if (atomic_dec_and_test(&kref->refcount)) {
67681 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67682 index d9df745..e73c2fe 100644
67683 --- a/lib/radix-tree.c
67684 +++ b/lib/radix-tree.c
67685 @@ -80,7 +80,7 @@ struct radix_tree_preload {
67686 int nr;
67687 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67688 };
67689 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67690 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67691
67692 static inline void *ptr_to_indirect(void *ptr)
67693 {
67694 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67695 index 993599e..84dc70e 100644
67696 --- a/lib/vsprintf.c
67697 +++ b/lib/vsprintf.c
67698 @@ -16,6 +16,9 @@
67699 * - scnprintf and vscnprintf
67700 */
67701
67702 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67703 +#define __INCLUDED_BY_HIDESYM 1
67704 +#endif
67705 #include <stdarg.h>
67706 #include <linux/module.h>
67707 #include <linux/types.h>
67708 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67709 char sym[KSYM_SYMBOL_LEN];
67710 if (ext == 'B')
67711 sprint_backtrace(sym, value);
67712 - else if (ext != 'f' && ext != 's')
67713 + else if (ext != 'f' && ext != 's' && ext != 'a')
67714 sprint_symbol(sym, value);
67715 else
67716 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67717 @@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67718 return string(buf, end, uuid, spec);
67719 }
67720
67721 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67722 +int kptr_restrict __read_mostly = 2;
67723 +#else
67724 int kptr_restrict __read_mostly;
67725 +#endif
67726
67727 /*
67728 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67729 @@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67730 * - 'S' For symbolic direct pointers with offset
67731 * - 's' For symbolic direct pointers without offset
67732 * - 'B' For backtraced symbolic direct pointers with offset
67733 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67734 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67735 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67736 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67737 * - 'M' For a 6-byte MAC address, it prints the address in the
67738 @@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67739 {
67740 if (!ptr && *fmt != 'K') {
67741 /*
67742 - * Print (null) with the same width as a pointer so it makes
67743 + * Print (nil) with the same width as a pointer so it makes
67744 * tabular output look nice.
67745 */
67746 if (spec.field_width == -1)
67747 spec.field_width = 2 * sizeof(void *);
67748 - return string(buf, end, "(null)", spec);
67749 + return string(buf, end, "(nil)", spec);
67750 }
67751
67752 switch (*fmt) {
67753 @@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67754 /* Fallthrough */
67755 case 'S':
67756 case 's':
67757 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67758 + break;
67759 +#else
67760 + return symbol_string(buf, end, ptr, spec, *fmt);
67761 +#endif
67762 + case 'A':
67763 + case 'a':
67764 case 'B':
67765 return symbol_string(buf, end, ptr, spec, *fmt);
67766 case 'R':
67767 @@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67768 typeof(type) value; \
67769 if (sizeof(type) == 8) { \
67770 args = PTR_ALIGN(args, sizeof(u32)); \
67771 - *(u32 *)&value = *(u32 *)args; \
67772 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67773 + *(u32 *)&value = *(const u32 *)args; \
67774 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67775 } else { \
67776 args = PTR_ALIGN(args, sizeof(type)); \
67777 - value = *(typeof(type) *)args; \
67778 + value = *(const typeof(type) *)args; \
67779 } \
67780 args += sizeof(type); \
67781 value; \
67782 @@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67783 case FORMAT_TYPE_STR: {
67784 const char *str_arg = args;
67785 args += strlen(str_arg) + 1;
67786 - str = string(str, end, (char *)str_arg, spec);
67787 + str = string(str, end, str_arg, spec);
67788 break;
67789 }
67790
67791 diff --git a/localversion-grsec b/localversion-grsec
67792 new file mode 100644
67793 index 0000000..7cd6065
67794 --- /dev/null
67795 +++ b/localversion-grsec
67796 @@ -0,0 +1 @@
67797 +-grsec
67798 diff --git a/mm/Kconfig b/mm/Kconfig
67799 index 011b110..b492af2 100644
67800 --- a/mm/Kconfig
67801 +++ b/mm/Kconfig
67802 @@ -241,10 +241,10 @@ config KSM
67803 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67804
67805 config DEFAULT_MMAP_MIN_ADDR
67806 - int "Low address space to protect from user allocation"
67807 + int "Low address space to protect from user allocation"
67808 depends on MMU
67809 - default 4096
67810 - help
67811 + default 65536
67812 + help
67813 This is the portion of low virtual memory which should be protected
67814 from userspace allocation. Keeping a user from writing to low pages
67815 can help reduce the impact of kernel NULL pointer bugs.
67816 diff --git a/mm/filemap.c b/mm/filemap.c
67817 index 03c5b0e..a01e793 100644
67818 --- a/mm/filemap.c
67819 +++ b/mm/filemap.c
67820 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67821 struct address_space *mapping = file->f_mapping;
67822
67823 if (!mapping->a_ops->readpage)
67824 - return -ENOEXEC;
67825 + return -ENODEV;
67826 file_accessed(file);
67827 vma->vm_ops = &generic_file_vm_ops;
67828 vma->vm_flags |= VM_CAN_NONLINEAR;
67829 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67830 *pos = i_size_read(inode);
67831
67832 if (limit != RLIM_INFINITY) {
67833 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67834 if (*pos >= limit) {
67835 send_sig(SIGXFSZ, current, 0);
67836 return -EFBIG;
67837 diff --git a/mm/fremap.c b/mm/fremap.c
67838 index 9ed4fd4..c42648d 100644
67839 --- a/mm/fremap.c
67840 +++ b/mm/fremap.c
67841 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67842 retry:
67843 vma = find_vma(mm, start);
67844
67845 +#ifdef CONFIG_PAX_SEGMEXEC
67846 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67847 + goto out;
67848 +#endif
67849 +
67850 /*
67851 * Make sure the vma is shared, that it supports prefaulting,
67852 * and that the remapped range is valid and fully within
67853 diff --git a/mm/highmem.c b/mm/highmem.c
67854 index 57d82c6..e9e0552 100644
67855 --- a/mm/highmem.c
67856 +++ b/mm/highmem.c
67857 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67858 * So no dangers, even with speculative execution.
67859 */
67860 page = pte_page(pkmap_page_table[i]);
67861 + pax_open_kernel();
67862 pte_clear(&init_mm, (unsigned long)page_address(page),
67863 &pkmap_page_table[i]);
67864 -
67865 + pax_close_kernel();
67866 set_page_address(page, NULL);
67867 need_flush = 1;
67868 }
67869 @@ -186,9 +187,11 @@ start:
67870 }
67871 }
67872 vaddr = PKMAP_ADDR(last_pkmap_nr);
67873 +
67874 + pax_open_kernel();
67875 set_pte_at(&init_mm, vaddr,
67876 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67877 -
67878 + pax_close_kernel();
67879 pkmap_count[last_pkmap_nr] = 1;
67880 set_page_address(page, (void *)vaddr);
67881
67882 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67883 index 33141f5..e56bef9 100644
67884 --- a/mm/huge_memory.c
67885 +++ b/mm/huge_memory.c
67886 @@ -703,7 +703,7 @@ out:
67887 * run pte_offset_map on the pmd, if an huge pmd could
67888 * materialize from under us from a different thread.
67889 */
67890 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67891 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67892 return VM_FAULT_OOM;
67893 /* if an huge pmd materialized from under us just retry later */
67894 if (unlikely(pmd_trans_huge(*pmd)))
67895 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67896 index 2316840..b418671 100644
67897 --- a/mm/hugetlb.c
67898 +++ b/mm/hugetlb.c
67899 @@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67900 return 1;
67901 }
67902
67903 +#ifdef CONFIG_PAX_SEGMEXEC
67904 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67905 +{
67906 + struct mm_struct *mm = vma->vm_mm;
67907 + struct vm_area_struct *vma_m;
67908 + unsigned long address_m;
67909 + pte_t *ptep_m;
67910 +
67911 + vma_m = pax_find_mirror_vma(vma);
67912 + if (!vma_m)
67913 + return;
67914 +
67915 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67916 + address_m = address + SEGMEXEC_TASK_SIZE;
67917 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67918 + get_page(page_m);
67919 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
67920 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67921 +}
67922 +#endif
67923 +
67924 /*
67925 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67926 */
67927 @@ -2450,6 +2471,11 @@ retry_avoidcopy:
67928 make_huge_pte(vma, new_page, 1));
67929 page_remove_rmap(old_page);
67930 hugepage_add_new_anon_rmap(new_page, vma, address);
67931 +
67932 +#ifdef CONFIG_PAX_SEGMEXEC
67933 + pax_mirror_huge_pte(vma, address, new_page);
67934 +#endif
67935 +
67936 /* Make the old page be freed below */
67937 new_page = old_page;
67938 mmu_notifier_invalidate_range_end(mm,
67939 @@ -2601,6 +2627,10 @@ retry:
67940 && (vma->vm_flags & VM_SHARED)));
67941 set_huge_pte_at(mm, address, ptep, new_pte);
67942
67943 +#ifdef CONFIG_PAX_SEGMEXEC
67944 + pax_mirror_huge_pte(vma, address, page);
67945 +#endif
67946 +
67947 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67948 /* Optimization, do the COW without a second fault */
67949 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67950 @@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67951 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67952 struct hstate *h = hstate_vma(vma);
67953
67954 +#ifdef CONFIG_PAX_SEGMEXEC
67955 + struct vm_area_struct *vma_m;
67956 +#endif
67957 +
67958 ptep = huge_pte_offset(mm, address);
67959 if (ptep) {
67960 entry = huge_ptep_get(ptep);
67961 @@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67962 VM_FAULT_SET_HINDEX(h - hstates);
67963 }
67964
67965 +#ifdef CONFIG_PAX_SEGMEXEC
67966 + vma_m = pax_find_mirror_vma(vma);
67967 + if (vma_m) {
67968 + unsigned long address_m;
67969 +
67970 + if (vma->vm_start > vma_m->vm_start) {
67971 + address_m = address;
67972 + address -= SEGMEXEC_TASK_SIZE;
67973 + vma = vma_m;
67974 + h = hstate_vma(vma);
67975 + } else
67976 + address_m = address + SEGMEXEC_TASK_SIZE;
67977 +
67978 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67979 + return VM_FAULT_OOM;
67980 + address_m &= HPAGE_MASK;
67981 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67982 + }
67983 +#endif
67984 +
67985 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67986 if (!ptep)
67987 return VM_FAULT_OOM;
67988 diff --git a/mm/internal.h b/mm/internal.h
67989 index 2189af4..f2ca332 100644
67990 --- a/mm/internal.h
67991 +++ b/mm/internal.h
67992 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
67993 * in mm/page_alloc.c
67994 */
67995 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67996 +extern void free_compound_page(struct page *page);
67997 extern void prep_compound_page(struct page *page, unsigned long order);
67998 #ifdef CONFIG_MEMORY_FAILURE
67999 extern bool is_free_buddy_page(struct page *page);
68000 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
68001 index f3b2a00..61da94d 100644
68002 --- a/mm/kmemleak.c
68003 +++ b/mm/kmemleak.c
68004 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
68005
68006 for (i = 0; i < object->trace_len; i++) {
68007 void *ptr = (void *)object->trace[i];
68008 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
68009 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
68010 }
68011 }
68012
68013 diff --git a/mm/maccess.c b/mm/maccess.c
68014 index d53adf9..03a24bf 100644
68015 --- a/mm/maccess.c
68016 +++ b/mm/maccess.c
68017 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
68018 set_fs(KERNEL_DS);
68019 pagefault_disable();
68020 ret = __copy_from_user_inatomic(dst,
68021 - (__force const void __user *)src, size);
68022 + (const void __force_user *)src, size);
68023 pagefault_enable();
68024 set_fs(old_fs);
68025
68026 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
68027
68028 set_fs(KERNEL_DS);
68029 pagefault_disable();
68030 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
68031 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
68032 pagefault_enable();
68033 set_fs(old_fs);
68034
68035 diff --git a/mm/madvise.c b/mm/madvise.c
68036 index 74bf193..feb6fd3 100644
68037 --- a/mm/madvise.c
68038 +++ b/mm/madvise.c
68039 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
68040 pgoff_t pgoff;
68041 unsigned long new_flags = vma->vm_flags;
68042
68043 +#ifdef CONFIG_PAX_SEGMEXEC
68044 + struct vm_area_struct *vma_m;
68045 +#endif
68046 +
68047 switch (behavior) {
68048 case MADV_NORMAL:
68049 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
68050 @@ -110,6 +114,13 @@ success:
68051 /*
68052 * vm_flags is protected by the mmap_sem held in write mode.
68053 */
68054 +
68055 +#ifdef CONFIG_PAX_SEGMEXEC
68056 + vma_m = pax_find_mirror_vma(vma);
68057 + if (vma_m)
68058 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
68059 +#endif
68060 +
68061 vma->vm_flags = new_flags;
68062
68063 out:
68064 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
68065 struct vm_area_struct ** prev,
68066 unsigned long start, unsigned long end)
68067 {
68068 +
68069 +#ifdef CONFIG_PAX_SEGMEXEC
68070 + struct vm_area_struct *vma_m;
68071 +#endif
68072 +
68073 *prev = vma;
68074 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
68075 return -EINVAL;
68076 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
68077 zap_page_range(vma, start, end - start, &details);
68078 } else
68079 zap_page_range(vma, start, end - start, NULL);
68080 +
68081 +#ifdef CONFIG_PAX_SEGMEXEC
68082 + vma_m = pax_find_mirror_vma(vma);
68083 + if (vma_m) {
68084 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
68085 + struct zap_details details = {
68086 + .nonlinear_vma = vma_m,
68087 + .last_index = ULONG_MAX,
68088 + };
68089 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
68090 + } else
68091 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
68092 + }
68093 +#endif
68094 +
68095 return 0;
68096 }
68097
68098 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
68099 if (end < start)
68100 goto out;
68101
68102 +#ifdef CONFIG_PAX_SEGMEXEC
68103 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
68104 + if (end > SEGMEXEC_TASK_SIZE)
68105 + goto out;
68106 + } else
68107 +#endif
68108 +
68109 + if (end > TASK_SIZE)
68110 + goto out;
68111 +
68112 error = 0;
68113 if (end == start)
68114 goto out;
68115 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
68116 index 06d3479..0778eef 100644
68117 --- a/mm/memory-failure.c
68118 +++ b/mm/memory-failure.c
68119 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
68120
68121 int sysctl_memory_failure_recovery __read_mostly = 1;
68122
68123 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68124 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68125
68126 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
68127
68128 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
68129 si.si_signo = SIGBUS;
68130 si.si_errno = 0;
68131 si.si_code = BUS_MCEERR_AO;
68132 - si.si_addr = (void *)addr;
68133 + si.si_addr = (void __user *)addr;
68134 #ifdef __ARCH_SI_TRAPNO
68135 si.si_trapno = trapno;
68136 #endif
68137 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68138 }
68139
68140 nr_pages = 1 << compound_trans_order(hpage);
68141 - atomic_long_add(nr_pages, &mce_bad_pages);
68142 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
68143
68144 /*
68145 * We need/can do nothing about count=0 pages.
68146 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68147 if (!PageHWPoison(hpage)
68148 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
68149 || (p != hpage && TestSetPageHWPoison(hpage))) {
68150 - atomic_long_sub(nr_pages, &mce_bad_pages);
68151 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68152 return 0;
68153 }
68154 set_page_hwpoison_huge_page(hpage);
68155 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68156 }
68157 if (hwpoison_filter(p)) {
68158 if (TestClearPageHWPoison(p))
68159 - atomic_long_sub(nr_pages, &mce_bad_pages);
68160 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68161 unlock_page(hpage);
68162 put_page(hpage);
68163 return 0;
68164 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
68165 return 0;
68166 }
68167 if (TestClearPageHWPoison(p))
68168 - atomic_long_sub(nr_pages, &mce_bad_pages);
68169 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68170 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
68171 return 0;
68172 }
68173 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
68174 */
68175 if (TestClearPageHWPoison(page)) {
68176 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
68177 - atomic_long_sub(nr_pages, &mce_bad_pages);
68178 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68179 freeit = 1;
68180 if (PageHuge(page))
68181 clear_page_hwpoison_huge_page(page);
68182 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
68183 }
68184 done:
68185 if (!PageHWPoison(hpage))
68186 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
68187 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
68188 set_page_hwpoison_huge_page(hpage);
68189 dequeue_hwpoisoned_huge_page(hpage);
68190 /* keep elevated page count for bad page */
68191 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
68192 return ret;
68193
68194 done:
68195 - atomic_long_add(1, &mce_bad_pages);
68196 + atomic_long_add_unchecked(1, &mce_bad_pages);
68197 SetPageHWPoison(page);
68198 /* keep elevated page count for bad page */
68199 return ret;
68200 diff --git a/mm/memory.c b/mm/memory.c
68201 index 829d437..3d3926a 100644
68202 --- a/mm/memory.c
68203 +++ b/mm/memory.c
68204 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
68205 return;
68206
68207 pmd = pmd_offset(pud, start);
68208 +
68209 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
68210 pud_clear(pud);
68211 pmd_free_tlb(tlb, pmd, start);
68212 +#endif
68213 +
68214 }
68215
68216 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
68217 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
68218 if (end - 1 > ceiling - 1)
68219 return;
68220
68221 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
68222 pud = pud_offset(pgd, start);
68223 pgd_clear(pgd);
68224 pud_free_tlb(tlb, pud, start);
68225 +#endif
68226 +
68227 }
68228
68229 /*
68230 @@ -1566,12 +1573,6 @@ no_page_table:
68231 return page;
68232 }
68233
68234 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68235 -{
68236 - return stack_guard_page_start(vma, addr) ||
68237 - stack_guard_page_end(vma, addr+PAGE_SIZE);
68238 -}
68239 -
68240 /**
68241 * __get_user_pages() - pin user pages in memory
68242 * @tsk: task_struct of target task
68243 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68244 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
68245 i = 0;
68246
68247 - do {
68248 + while (nr_pages) {
68249 struct vm_area_struct *vma;
68250
68251 - vma = find_extend_vma(mm, start);
68252 + vma = find_vma(mm, start);
68253 if (!vma && in_gate_area(mm, start)) {
68254 unsigned long pg = start & PAGE_MASK;
68255 pgd_t *pgd;
68256 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68257 goto next_page;
68258 }
68259
68260 - if (!vma ||
68261 + if (!vma || start < vma->vm_start ||
68262 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
68263 !(vm_flags & vma->vm_flags))
68264 return i ? : -EFAULT;
68265 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68266 int ret;
68267 unsigned int fault_flags = 0;
68268
68269 - /* For mlock, just skip the stack guard page. */
68270 - if (foll_flags & FOLL_MLOCK) {
68271 - if (stack_guard_page(vma, start))
68272 - goto next_page;
68273 - }
68274 if (foll_flags & FOLL_WRITE)
68275 fault_flags |= FAULT_FLAG_WRITE;
68276 if (nonblocking)
68277 @@ -1800,7 +1796,7 @@ next_page:
68278 start += PAGE_SIZE;
68279 nr_pages--;
68280 } while (nr_pages && start < vma->vm_end);
68281 - } while (nr_pages);
68282 + }
68283 return i;
68284 }
68285 EXPORT_SYMBOL(__get_user_pages);
68286 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
68287 page_add_file_rmap(page);
68288 set_pte_at(mm, addr, pte, mk_pte(page, prot));
68289
68290 +#ifdef CONFIG_PAX_SEGMEXEC
68291 + pax_mirror_file_pte(vma, addr, page, ptl);
68292 +#endif
68293 +
68294 retval = 0;
68295 pte_unmap_unlock(pte, ptl);
68296 return retval;
68297 @@ -2041,10 +2041,22 @@ out:
68298 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
68299 struct page *page)
68300 {
68301 +
68302 +#ifdef CONFIG_PAX_SEGMEXEC
68303 + struct vm_area_struct *vma_m;
68304 +#endif
68305 +
68306 if (addr < vma->vm_start || addr >= vma->vm_end)
68307 return -EFAULT;
68308 if (!page_count(page))
68309 return -EINVAL;
68310 +
68311 +#ifdef CONFIG_PAX_SEGMEXEC
68312 + vma_m = pax_find_mirror_vma(vma);
68313 + if (vma_m)
68314 + vma_m->vm_flags |= VM_INSERTPAGE;
68315 +#endif
68316 +
68317 vma->vm_flags |= VM_INSERTPAGE;
68318 return insert_page(vma, addr, page, vma->vm_page_prot);
68319 }
68320 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
68321 unsigned long pfn)
68322 {
68323 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
68324 + BUG_ON(vma->vm_mirror);
68325
68326 if (addr < vma->vm_start || addr >= vma->vm_end)
68327 return -EFAULT;
68328 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
68329 copy_user_highpage(dst, src, va, vma);
68330 }
68331
68332 +#ifdef CONFIG_PAX_SEGMEXEC
68333 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68334 +{
68335 + struct mm_struct *mm = vma->vm_mm;
68336 + spinlock_t *ptl;
68337 + pte_t *pte, entry;
68338 +
68339 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68340 + entry = *pte;
68341 + if (!pte_present(entry)) {
68342 + if (!pte_none(entry)) {
68343 + BUG_ON(pte_file(entry));
68344 + free_swap_and_cache(pte_to_swp_entry(entry));
68345 + pte_clear_not_present_full(mm, address, pte, 0);
68346 + }
68347 + } else {
68348 + struct page *page;
68349 +
68350 + flush_cache_page(vma, address, pte_pfn(entry));
68351 + entry = ptep_clear_flush(vma, address, pte);
68352 + BUG_ON(pte_dirty(entry));
68353 + page = vm_normal_page(vma, address, entry);
68354 + if (page) {
68355 + update_hiwater_rss(mm);
68356 + if (PageAnon(page))
68357 + dec_mm_counter_fast(mm, MM_ANONPAGES);
68358 + else
68359 + dec_mm_counter_fast(mm, MM_FILEPAGES);
68360 + page_remove_rmap(page);
68361 + page_cache_release(page);
68362 + }
68363 + }
68364 + pte_unmap_unlock(pte, ptl);
68365 +}
68366 +
68367 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
68368 + *
68369 + * the ptl of the lower mapped page is held on entry and is not released on exit
68370 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68371 + */
68372 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68373 +{
68374 + struct mm_struct *mm = vma->vm_mm;
68375 + unsigned long address_m;
68376 + spinlock_t *ptl_m;
68377 + struct vm_area_struct *vma_m;
68378 + pmd_t *pmd_m;
68379 + pte_t *pte_m, entry_m;
68380 +
68381 + BUG_ON(!page_m || !PageAnon(page_m));
68382 +
68383 + vma_m = pax_find_mirror_vma(vma);
68384 + if (!vma_m)
68385 + return;
68386 +
68387 + BUG_ON(!PageLocked(page_m));
68388 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68389 + address_m = address + SEGMEXEC_TASK_SIZE;
68390 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68391 + pte_m = pte_offset_map(pmd_m, address_m);
68392 + ptl_m = pte_lockptr(mm, pmd_m);
68393 + if (ptl != ptl_m) {
68394 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68395 + if (!pte_none(*pte_m))
68396 + goto out;
68397 + }
68398 +
68399 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68400 + page_cache_get(page_m);
68401 + page_add_anon_rmap(page_m, vma_m, address_m);
68402 + inc_mm_counter_fast(mm, MM_ANONPAGES);
68403 + set_pte_at(mm, address_m, pte_m, entry_m);
68404 + update_mmu_cache(vma_m, address_m, entry_m);
68405 +out:
68406 + if (ptl != ptl_m)
68407 + spin_unlock(ptl_m);
68408 + pte_unmap(pte_m);
68409 + unlock_page(page_m);
68410 +}
68411 +
68412 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68413 +{
68414 + struct mm_struct *mm = vma->vm_mm;
68415 + unsigned long address_m;
68416 + spinlock_t *ptl_m;
68417 + struct vm_area_struct *vma_m;
68418 + pmd_t *pmd_m;
68419 + pte_t *pte_m, entry_m;
68420 +
68421 + BUG_ON(!page_m || PageAnon(page_m));
68422 +
68423 + vma_m = pax_find_mirror_vma(vma);
68424 + if (!vma_m)
68425 + return;
68426 +
68427 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68428 + address_m = address + SEGMEXEC_TASK_SIZE;
68429 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68430 + pte_m = pte_offset_map(pmd_m, address_m);
68431 + ptl_m = pte_lockptr(mm, pmd_m);
68432 + if (ptl != ptl_m) {
68433 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68434 + if (!pte_none(*pte_m))
68435 + goto out;
68436 + }
68437 +
68438 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68439 + page_cache_get(page_m);
68440 + page_add_file_rmap(page_m);
68441 + inc_mm_counter_fast(mm, MM_FILEPAGES);
68442 + set_pte_at(mm, address_m, pte_m, entry_m);
68443 + update_mmu_cache(vma_m, address_m, entry_m);
68444 +out:
68445 + if (ptl != ptl_m)
68446 + spin_unlock(ptl_m);
68447 + pte_unmap(pte_m);
68448 +}
68449 +
68450 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68451 +{
68452 + struct mm_struct *mm = vma->vm_mm;
68453 + unsigned long address_m;
68454 + spinlock_t *ptl_m;
68455 + struct vm_area_struct *vma_m;
68456 + pmd_t *pmd_m;
68457 + pte_t *pte_m, entry_m;
68458 +
68459 + vma_m = pax_find_mirror_vma(vma);
68460 + if (!vma_m)
68461 + return;
68462 +
68463 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68464 + address_m = address + SEGMEXEC_TASK_SIZE;
68465 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68466 + pte_m = pte_offset_map(pmd_m, address_m);
68467 + ptl_m = pte_lockptr(mm, pmd_m);
68468 + if (ptl != ptl_m) {
68469 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68470 + if (!pte_none(*pte_m))
68471 + goto out;
68472 + }
68473 +
68474 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68475 + set_pte_at(mm, address_m, pte_m, entry_m);
68476 +out:
68477 + if (ptl != ptl_m)
68478 + spin_unlock(ptl_m);
68479 + pte_unmap(pte_m);
68480 +}
68481 +
68482 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68483 +{
68484 + struct page *page_m;
68485 + pte_t entry;
68486 +
68487 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68488 + goto out;
68489 +
68490 + entry = *pte;
68491 + page_m = vm_normal_page(vma, address, entry);
68492 + if (!page_m)
68493 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68494 + else if (PageAnon(page_m)) {
68495 + if (pax_find_mirror_vma(vma)) {
68496 + pte_unmap_unlock(pte, ptl);
68497 + lock_page(page_m);
68498 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68499 + if (pte_same(entry, *pte))
68500 + pax_mirror_anon_pte(vma, address, page_m, ptl);
68501 + else
68502 + unlock_page(page_m);
68503 + }
68504 + } else
68505 + pax_mirror_file_pte(vma, address, page_m, ptl);
68506 +
68507 +out:
68508 + pte_unmap_unlock(pte, ptl);
68509 +}
68510 +#endif
68511 +
68512 /*
68513 * This routine handles present pages, when users try to write
68514 * to a shared page. It is done by copying the page to a new address
68515 @@ -2656,6 +2849,12 @@ gotten:
68516 */
68517 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68518 if (likely(pte_same(*page_table, orig_pte))) {
68519 +
68520 +#ifdef CONFIG_PAX_SEGMEXEC
68521 + if (pax_find_mirror_vma(vma))
68522 + BUG_ON(!trylock_page(new_page));
68523 +#endif
68524 +
68525 if (old_page) {
68526 if (!PageAnon(old_page)) {
68527 dec_mm_counter_fast(mm, MM_FILEPAGES);
68528 @@ -2707,6 +2906,10 @@ gotten:
68529 page_remove_rmap(old_page);
68530 }
68531
68532 +#ifdef CONFIG_PAX_SEGMEXEC
68533 + pax_mirror_anon_pte(vma, address, new_page, ptl);
68534 +#endif
68535 +
68536 /* Free the old page.. */
68537 new_page = old_page;
68538 ret |= VM_FAULT_WRITE;
68539 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68540 swap_free(entry);
68541 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68542 try_to_free_swap(page);
68543 +
68544 +#ifdef CONFIG_PAX_SEGMEXEC
68545 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68546 +#endif
68547 +
68548 unlock_page(page);
68549 if (swapcache) {
68550 /*
68551 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68552
68553 /* No need to invalidate - it was non-present before */
68554 update_mmu_cache(vma, address, page_table);
68555 +
68556 +#ifdef CONFIG_PAX_SEGMEXEC
68557 + pax_mirror_anon_pte(vma, address, page, ptl);
68558 +#endif
68559 +
68560 unlock:
68561 pte_unmap_unlock(page_table, ptl);
68562 out:
68563 @@ -3028,40 +3241,6 @@ out_release:
68564 }
68565
68566 /*
68567 - * This is like a special single-page "expand_{down|up}wards()",
68568 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
68569 - * doesn't hit another vma.
68570 - */
68571 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68572 -{
68573 - address &= PAGE_MASK;
68574 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68575 - struct vm_area_struct *prev = vma->vm_prev;
68576 -
68577 - /*
68578 - * Is there a mapping abutting this one below?
68579 - *
68580 - * That's only ok if it's the same stack mapping
68581 - * that has gotten split..
68582 - */
68583 - if (prev && prev->vm_end == address)
68584 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68585 -
68586 - expand_downwards(vma, address - PAGE_SIZE);
68587 - }
68588 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68589 - struct vm_area_struct *next = vma->vm_next;
68590 -
68591 - /* As VM_GROWSDOWN but s/below/above/ */
68592 - if (next && next->vm_start == address + PAGE_SIZE)
68593 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68594 -
68595 - expand_upwards(vma, address + PAGE_SIZE);
68596 - }
68597 - return 0;
68598 -}
68599 -
68600 -/*
68601 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68602 * but allow concurrent faults), and pte mapped but not yet locked.
68603 * We return with mmap_sem still held, but pte unmapped and unlocked.
68604 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68605 unsigned long address, pte_t *page_table, pmd_t *pmd,
68606 unsigned int flags)
68607 {
68608 - struct page *page;
68609 + struct page *page = NULL;
68610 spinlock_t *ptl;
68611 pte_t entry;
68612
68613 - pte_unmap(page_table);
68614 -
68615 - /* Check if we need to add a guard page to the stack */
68616 - if (check_stack_guard_page(vma, address) < 0)
68617 - return VM_FAULT_SIGBUS;
68618 -
68619 - /* Use the zero-page for reads */
68620 if (!(flags & FAULT_FLAG_WRITE)) {
68621 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68622 vma->vm_page_prot));
68623 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68624 + ptl = pte_lockptr(mm, pmd);
68625 + spin_lock(ptl);
68626 if (!pte_none(*page_table))
68627 goto unlock;
68628 goto setpte;
68629 }
68630
68631 /* Allocate our own private page. */
68632 + pte_unmap(page_table);
68633 +
68634 if (unlikely(anon_vma_prepare(vma)))
68635 goto oom;
68636 page = alloc_zeroed_user_highpage_movable(vma, address);
68637 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68638 if (!pte_none(*page_table))
68639 goto release;
68640
68641 +#ifdef CONFIG_PAX_SEGMEXEC
68642 + if (pax_find_mirror_vma(vma))
68643 + BUG_ON(!trylock_page(page));
68644 +#endif
68645 +
68646 inc_mm_counter_fast(mm, MM_ANONPAGES);
68647 page_add_new_anon_rmap(page, vma, address);
68648 setpte:
68649 @@ -3116,6 +3296,12 @@ setpte:
68650
68651 /* No need to invalidate - it was non-present before */
68652 update_mmu_cache(vma, address, page_table);
68653 +
68654 +#ifdef CONFIG_PAX_SEGMEXEC
68655 + if (page)
68656 + pax_mirror_anon_pte(vma, address, page, ptl);
68657 +#endif
68658 +
68659 unlock:
68660 pte_unmap_unlock(page_table, ptl);
68661 return 0;
68662 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68663 */
68664 /* Only go through if we didn't race with anybody else... */
68665 if (likely(pte_same(*page_table, orig_pte))) {
68666 +
68667 +#ifdef CONFIG_PAX_SEGMEXEC
68668 + if (anon && pax_find_mirror_vma(vma))
68669 + BUG_ON(!trylock_page(page));
68670 +#endif
68671 +
68672 flush_icache_page(vma, page);
68673 entry = mk_pte(page, vma->vm_page_prot);
68674 if (flags & FAULT_FLAG_WRITE)
68675 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68676
68677 /* no need to invalidate: a not-present page won't be cached */
68678 update_mmu_cache(vma, address, page_table);
68679 +
68680 +#ifdef CONFIG_PAX_SEGMEXEC
68681 + if (anon)
68682 + pax_mirror_anon_pte(vma, address, page, ptl);
68683 + else
68684 + pax_mirror_file_pte(vma, address, page, ptl);
68685 +#endif
68686 +
68687 } else {
68688 if (cow_page)
68689 mem_cgroup_uncharge_page(cow_page);
68690 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68691 if (flags & FAULT_FLAG_WRITE)
68692 flush_tlb_fix_spurious_fault(vma, address);
68693 }
68694 +
68695 +#ifdef CONFIG_PAX_SEGMEXEC
68696 + pax_mirror_pte(vma, address, pte, pmd, ptl);
68697 + return 0;
68698 +#endif
68699 +
68700 unlock:
68701 pte_unmap_unlock(pte, ptl);
68702 return 0;
68703 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68704 pmd_t *pmd;
68705 pte_t *pte;
68706
68707 +#ifdef CONFIG_PAX_SEGMEXEC
68708 + struct vm_area_struct *vma_m;
68709 +#endif
68710 +
68711 __set_current_state(TASK_RUNNING);
68712
68713 count_vm_event(PGFAULT);
68714 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68715 if (unlikely(is_vm_hugetlb_page(vma)))
68716 return hugetlb_fault(mm, vma, address, flags);
68717
68718 +#ifdef CONFIG_PAX_SEGMEXEC
68719 + vma_m = pax_find_mirror_vma(vma);
68720 + if (vma_m) {
68721 + unsigned long address_m;
68722 + pgd_t *pgd_m;
68723 + pud_t *pud_m;
68724 + pmd_t *pmd_m;
68725 +
68726 + if (vma->vm_start > vma_m->vm_start) {
68727 + address_m = address;
68728 + address -= SEGMEXEC_TASK_SIZE;
68729 + vma = vma_m;
68730 + } else
68731 + address_m = address + SEGMEXEC_TASK_SIZE;
68732 +
68733 + pgd_m = pgd_offset(mm, address_m);
68734 + pud_m = pud_alloc(mm, pgd_m, address_m);
68735 + if (!pud_m)
68736 + return VM_FAULT_OOM;
68737 + pmd_m = pmd_alloc(mm, pud_m, address_m);
68738 + if (!pmd_m)
68739 + return VM_FAULT_OOM;
68740 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68741 + return VM_FAULT_OOM;
68742 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68743 + }
68744 +#endif
68745 +
68746 pgd = pgd_offset(mm, address);
68747 pud = pud_alloc(mm, pgd, address);
68748 if (!pud)
68749 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68750 * run pte_offset_map on the pmd, if an huge pmd could
68751 * materialize from under us from a different thread.
68752 */
68753 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68754 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68755 return VM_FAULT_OOM;
68756 /* if an huge pmd materialized from under us just retry later */
68757 if (unlikely(pmd_trans_huge(*pmd)))
68758 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68759 gate_vma.vm_start = FIXADDR_USER_START;
68760 gate_vma.vm_end = FIXADDR_USER_END;
68761 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68762 - gate_vma.vm_page_prot = __P101;
68763 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68764 /*
68765 * Make sure the vDSO gets into every core dump.
68766 * Dumping its contents makes post-mortem fully interpretable later
68767 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68768 index c3fdbcb..2e8ef90 100644
68769 --- a/mm/mempolicy.c
68770 +++ b/mm/mempolicy.c
68771 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68772 unsigned long vmstart;
68773 unsigned long vmend;
68774
68775 +#ifdef CONFIG_PAX_SEGMEXEC
68776 + struct vm_area_struct *vma_m;
68777 +#endif
68778 +
68779 vma = find_vma_prev(mm, start, &prev);
68780 if (!vma || vma->vm_start > start)
68781 return -EFAULT;
68782 @@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68783 err = policy_vma(vma, new_pol);
68784 if (err)
68785 goto out;
68786 +
68787 +#ifdef CONFIG_PAX_SEGMEXEC
68788 + vma_m = pax_find_mirror_vma(vma);
68789 + if (vma_m) {
68790 + err = policy_vma(vma_m, new_pol);
68791 + if (err)
68792 + goto out;
68793 + }
68794 +#endif
68795 +
68796 }
68797
68798 out:
68799 @@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68800
68801 if (end < start)
68802 return -EINVAL;
68803 +
68804 +#ifdef CONFIG_PAX_SEGMEXEC
68805 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68806 + if (end > SEGMEXEC_TASK_SIZE)
68807 + return -EINVAL;
68808 + } else
68809 +#endif
68810 +
68811 + if (end > TASK_SIZE)
68812 + return -EINVAL;
68813 +
68814 if (end == start)
68815 return 0;
68816
68817 @@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68818 if (!mm)
68819 goto out;
68820
68821 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68822 + if (mm != current->mm &&
68823 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68824 + err = -EPERM;
68825 + goto out;
68826 + }
68827 +#endif
68828 +
68829 /*
68830 * Check if this process has the right to modify the specified
68831 * process. The right exists if the process has administrative
68832 @@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68833 rcu_read_lock();
68834 tcred = __task_cred(task);
68835 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68836 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68837 - !capable(CAP_SYS_NICE)) {
68838 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68839 rcu_read_unlock();
68840 err = -EPERM;
68841 goto out;
68842 diff --git a/mm/migrate.c b/mm/migrate.c
68843 index 177aca4..ab3a744 100644
68844 --- a/mm/migrate.c
68845 +++ b/mm/migrate.c
68846 @@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68847 if (!mm)
68848 return -EINVAL;
68849
68850 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68851 + if (mm != current->mm &&
68852 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68853 + err = -EPERM;
68854 + goto out;
68855 + }
68856 +#endif
68857 +
68858 /*
68859 * Check if this process has the right to modify the specified
68860 * process. The right exists if the process has administrative
68861 @@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68862 rcu_read_lock();
68863 tcred = __task_cred(task);
68864 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68865 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68866 - !capable(CAP_SYS_NICE)) {
68867 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68868 rcu_read_unlock();
68869 err = -EPERM;
68870 goto out;
68871 diff --git a/mm/mlock.c b/mm/mlock.c
68872 index 4f4f53b..9511904 100644
68873 --- a/mm/mlock.c
68874 +++ b/mm/mlock.c
68875 @@ -13,6 +13,7 @@
68876 #include <linux/pagemap.h>
68877 #include <linux/mempolicy.h>
68878 #include <linux/syscalls.h>
68879 +#include <linux/security.h>
68880 #include <linux/sched.h>
68881 #include <linux/export.h>
68882 #include <linux/rmap.h>
68883 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68884 return -EINVAL;
68885 if (end == start)
68886 return 0;
68887 + if (end > TASK_SIZE)
68888 + return -EINVAL;
68889 +
68890 vma = find_vma_prev(current->mm, start, &prev);
68891 if (!vma || vma->vm_start > start)
68892 return -ENOMEM;
68893 @@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68894 for (nstart = start ; ; ) {
68895 vm_flags_t newflags;
68896
68897 +#ifdef CONFIG_PAX_SEGMEXEC
68898 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68899 + break;
68900 +#endif
68901 +
68902 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68903
68904 newflags = vma->vm_flags | VM_LOCKED;
68905 @@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68906 lock_limit >>= PAGE_SHIFT;
68907
68908 /* check against resource limits */
68909 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68910 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68911 error = do_mlock(start, len, 1);
68912 up_write(&current->mm->mmap_sem);
68913 @@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68914 static int do_mlockall(int flags)
68915 {
68916 struct vm_area_struct * vma, * prev = NULL;
68917 - unsigned int def_flags = 0;
68918
68919 if (flags & MCL_FUTURE)
68920 - def_flags = VM_LOCKED;
68921 - current->mm->def_flags = def_flags;
68922 + current->mm->def_flags |= VM_LOCKED;
68923 + else
68924 + current->mm->def_flags &= ~VM_LOCKED;
68925 if (flags == MCL_FUTURE)
68926 goto out;
68927
68928 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68929 vm_flags_t newflags;
68930
68931 +#ifdef CONFIG_PAX_SEGMEXEC
68932 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68933 + break;
68934 +#endif
68935 +
68936 + BUG_ON(vma->vm_end > TASK_SIZE);
68937 newflags = vma->vm_flags | VM_LOCKED;
68938 if (!(flags & MCL_CURRENT))
68939 newflags &= ~VM_LOCKED;
68940 @@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68941 lock_limit >>= PAGE_SHIFT;
68942
68943 ret = -ENOMEM;
68944 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68945 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68946 capable(CAP_IPC_LOCK))
68947 ret = do_mlockall(flags);
68948 diff --git a/mm/mmap.c b/mm/mmap.c
68949 index eae90af..44552cf 100644
68950 --- a/mm/mmap.c
68951 +++ b/mm/mmap.c
68952 @@ -46,6 +46,16 @@
68953 #define arch_rebalance_pgtables(addr, len) (addr)
68954 #endif
68955
68956 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68957 +{
68958 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68959 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68960 + up_read(&mm->mmap_sem);
68961 + BUG();
68962 + }
68963 +#endif
68964 +}
68965 +
68966 static void unmap_region(struct mm_struct *mm,
68967 struct vm_area_struct *vma, struct vm_area_struct *prev,
68968 unsigned long start, unsigned long end);
68969 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68970 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68971 *
68972 */
68973 -pgprot_t protection_map[16] = {
68974 +pgprot_t protection_map[16] __read_only = {
68975 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68976 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68977 };
68978
68979 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
68980 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68981 {
68982 - return __pgprot(pgprot_val(protection_map[vm_flags &
68983 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68984 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68985 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68986 +
68987 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68988 + if (!(__supported_pte_mask & _PAGE_NX) &&
68989 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68990 + (vm_flags & (VM_READ | VM_WRITE)))
68991 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68992 +#endif
68993 +
68994 + return prot;
68995 }
68996 EXPORT_SYMBOL(vm_get_page_prot);
68997
68998 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68999 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
69000 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
69001 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
69002 /*
69003 * Make sure vm_committed_as in one cacheline and not cacheline shared with
69004 * other variables. It can be updated by several CPUs frequently.
69005 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
69006 struct vm_area_struct *next = vma->vm_next;
69007
69008 might_sleep();
69009 + BUG_ON(vma->vm_mirror);
69010 if (vma->vm_ops && vma->vm_ops->close)
69011 vma->vm_ops->close(vma);
69012 if (vma->vm_file) {
69013 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
69014 * not page aligned -Ram Gupta
69015 */
69016 rlim = rlimit(RLIMIT_DATA);
69017 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
69018 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
69019 (mm->end_data - mm->start_data) > rlim)
69020 goto out;
69021 @@ -689,6 +711,12 @@ static int
69022 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
69023 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69024 {
69025 +
69026 +#ifdef CONFIG_PAX_SEGMEXEC
69027 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
69028 + return 0;
69029 +#endif
69030 +
69031 if (is_mergeable_vma(vma, file, vm_flags) &&
69032 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69033 if (vma->vm_pgoff == vm_pgoff)
69034 @@ -708,6 +736,12 @@ static int
69035 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
69036 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69037 {
69038 +
69039 +#ifdef CONFIG_PAX_SEGMEXEC
69040 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
69041 + return 0;
69042 +#endif
69043 +
69044 if (is_mergeable_vma(vma, file, vm_flags) &&
69045 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69046 pgoff_t vm_pglen;
69047 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
69048 struct vm_area_struct *vma_merge(struct mm_struct *mm,
69049 struct vm_area_struct *prev, unsigned long addr,
69050 unsigned long end, unsigned long vm_flags,
69051 - struct anon_vma *anon_vma, struct file *file,
69052 + struct anon_vma *anon_vma, struct file *file,
69053 pgoff_t pgoff, struct mempolicy *policy)
69054 {
69055 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
69056 struct vm_area_struct *area, *next;
69057 int err;
69058
69059 +#ifdef CONFIG_PAX_SEGMEXEC
69060 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
69061 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
69062 +
69063 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
69064 +#endif
69065 +
69066 /*
69067 * We later require that vma->vm_flags == vm_flags,
69068 * so this tests vma->vm_flags & VM_SPECIAL, too.
69069 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69070 if (next && next->vm_end == end) /* cases 6, 7, 8 */
69071 next = next->vm_next;
69072
69073 +#ifdef CONFIG_PAX_SEGMEXEC
69074 + if (prev)
69075 + prev_m = pax_find_mirror_vma(prev);
69076 + if (area)
69077 + area_m = pax_find_mirror_vma(area);
69078 + if (next)
69079 + next_m = pax_find_mirror_vma(next);
69080 +#endif
69081 +
69082 /*
69083 * Can it merge with the predecessor?
69084 */
69085 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69086 /* cases 1, 6 */
69087 err = vma_adjust(prev, prev->vm_start,
69088 next->vm_end, prev->vm_pgoff, NULL);
69089 - } else /* cases 2, 5, 7 */
69090 +
69091 +#ifdef CONFIG_PAX_SEGMEXEC
69092 + if (!err && prev_m)
69093 + err = vma_adjust(prev_m, prev_m->vm_start,
69094 + next_m->vm_end, prev_m->vm_pgoff, NULL);
69095 +#endif
69096 +
69097 + } else { /* cases 2, 5, 7 */
69098 err = vma_adjust(prev, prev->vm_start,
69099 end, prev->vm_pgoff, NULL);
69100 +
69101 +#ifdef CONFIG_PAX_SEGMEXEC
69102 + if (!err && prev_m)
69103 + err = vma_adjust(prev_m, prev_m->vm_start,
69104 + end_m, prev_m->vm_pgoff, NULL);
69105 +#endif
69106 +
69107 + }
69108 if (err)
69109 return NULL;
69110 khugepaged_enter_vma_merge(prev);
69111 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69112 mpol_equal(policy, vma_policy(next)) &&
69113 can_vma_merge_before(next, vm_flags,
69114 anon_vma, file, pgoff+pglen)) {
69115 - if (prev && addr < prev->vm_end) /* case 4 */
69116 + if (prev && addr < prev->vm_end) { /* case 4 */
69117 err = vma_adjust(prev, prev->vm_start,
69118 addr, prev->vm_pgoff, NULL);
69119 - else /* cases 3, 8 */
69120 +
69121 +#ifdef CONFIG_PAX_SEGMEXEC
69122 + if (!err && prev_m)
69123 + err = vma_adjust(prev_m, prev_m->vm_start,
69124 + addr_m, prev_m->vm_pgoff, NULL);
69125 +#endif
69126 +
69127 + } else { /* cases 3, 8 */
69128 err = vma_adjust(area, addr, next->vm_end,
69129 next->vm_pgoff - pglen, NULL);
69130 +
69131 +#ifdef CONFIG_PAX_SEGMEXEC
69132 + if (!err && area_m)
69133 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
69134 + next_m->vm_pgoff - pglen, NULL);
69135 +#endif
69136 +
69137 + }
69138 if (err)
69139 return NULL;
69140 khugepaged_enter_vma_merge(area);
69141 @@ -921,14 +1001,11 @@ none:
69142 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
69143 struct file *file, long pages)
69144 {
69145 - const unsigned long stack_flags
69146 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
69147 -
69148 if (file) {
69149 mm->shared_vm += pages;
69150 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
69151 mm->exec_vm += pages;
69152 - } else if (flags & stack_flags)
69153 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
69154 mm->stack_vm += pages;
69155 if (flags & (VM_RESERVED|VM_IO))
69156 mm->reserved_vm += pages;
69157 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69158 * (the exception is when the underlying filesystem is noexec
69159 * mounted, in which case we dont add PROT_EXEC.)
69160 */
69161 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69162 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69163 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
69164 prot |= PROT_EXEC;
69165
69166 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69167 /* Obtain the address to map to. we verify (or select) it and ensure
69168 * that it represents a valid section of the address space.
69169 */
69170 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
69171 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
69172 if (addr & ~PAGE_MASK)
69173 return addr;
69174
69175 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69176 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
69177 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
69178
69179 +#ifdef CONFIG_PAX_MPROTECT
69180 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69181 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69182 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
69183 + gr_log_rwxmmap(file);
69184 +
69185 +#ifdef CONFIG_PAX_EMUPLT
69186 + vm_flags &= ~VM_EXEC;
69187 +#else
69188 + return -EPERM;
69189 +#endif
69190 +
69191 + }
69192 +
69193 + if (!(vm_flags & VM_EXEC))
69194 + vm_flags &= ~VM_MAYEXEC;
69195 +#else
69196 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69197 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69198 +#endif
69199 + else
69200 + vm_flags &= ~VM_MAYWRITE;
69201 + }
69202 +#endif
69203 +
69204 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69205 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
69206 + vm_flags &= ~VM_PAGEEXEC;
69207 +#endif
69208 +
69209 if (flags & MAP_LOCKED)
69210 if (!can_do_mlock())
69211 return -EPERM;
69212 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69213 locked += mm->locked_vm;
69214 lock_limit = rlimit(RLIMIT_MEMLOCK);
69215 lock_limit >>= PAGE_SHIFT;
69216 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69217 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
69218 return -EAGAIN;
69219 }
69220 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69221 if (error)
69222 return error;
69223
69224 + if (!gr_acl_handle_mmap(file, prot))
69225 + return -EACCES;
69226 +
69227 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
69228 }
69229 EXPORT_SYMBOL(do_mmap_pgoff);
69230 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
69231 vm_flags_t vm_flags = vma->vm_flags;
69232
69233 /* If it was private or non-writable, the write bit is already clear */
69234 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
69235 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
69236 return 0;
69237
69238 /* The backer wishes to know when pages are first written to? */
69239 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
69240 unsigned long charged = 0;
69241 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
69242
69243 +#ifdef CONFIG_PAX_SEGMEXEC
69244 + struct vm_area_struct *vma_m = NULL;
69245 +#endif
69246 +
69247 + /*
69248 + * mm->mmap_sem is required to protect against another thread
69249 + * changing the mappings in case we sleep.
69250 + */
69251 + verify_mm_writelocked(mm);
69252 +
69253 /* Clear old maps */
69254 error = -ENOMEM;
69255 -munmap_back:
69256 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69257 if (vma && vma->vm_start < addr + len) {
69258 if (do_munmap(mm, addr, len))
69259 return -ENOMEM;
69260 - goto munmap_back;
69261 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69262 + BUG_ON(vma && vma->vm_start < addr + len);
69263 }
69264
69265 /* Check against address space limit. */
69266 @@ -1258,6 +1379,16 @@ munmap_back:
69267 goto unacct_error;
69268 }
69269
69270 +#ifdef CONFIG_PAX_SEGMEXEC
69271 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
69272 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69273 + if (!vma_m) {
69274 + error = -ENOMEM;
69275 + goto free_vma;
69276 + }
69277 + }
69278 +#endif
69279 +
69280 vma->vm_mm = mm;
69281 vma->vm_start = addr;
69282 vma->vm_end = addr + len;
69283 @@ -1281,6 +1412,19 @@ munmap_back:
69284 error = file->f_op->mmap(file, vma);
69285 if (error)
69286 goto unmap_and_free_vma;
69287 +
69288 +#ifdef CONFIG_PAX_SEGMEXEC
69289 + if (vma_m && (vm_flags & VM_EXECUTABLE))
69290 + added_exe_file_vma(mm);
69291 +#endif
69292 +
69293 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69294 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
69295 + vma->vm_flags |= VM_PAGEEXEC;
69296 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69297 + }
69298 +#endif
69299 +
69300 if (vm_flags & VM_EXECUTABLE)
69301 added_exe_file_vma(mm);
69302
69303 @@ -1316,6 +1460,11 @@ munmap_back:
69304 vma_link(mm, vma, prev, rb_link, rb_parent);
69305 file = vma->vm_file;
69306
69307 +#ifdef CONFIG_PAX_SEGMEXEC
69308 + if (vma_m)
69309 + BUG_ON(pax_mirror_vma(vma_m, vma));
69310 +#endif
69311 +
69312 /* Once vma denies write, undo our temporary denial count */
69313 if (correct_wcount)
69314 atomic_inc(&inode->i_writecount);
69315 @@ -1324,6 +1473,7 @@ out:
69316
69317 mm->total_vm += len >> PAGE_SHIFT;
69318 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69319 + track_exec_limit(mm, addr, addr + len, vm_flags);
69320 if (vm_flags & VM_LOCKED) {
69321 if (!mlock_vma_pages_range(vma, addr, addr + len))
69322 mm->locked_vm += (len >> PAGE_SHIFT);
69323 @@ -1341,6 +1491,12 @@ unmap_and_free_vma:
69324 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69325 charged = 0;
69326 free_vma:
69327 +
69328 +#ifdef CONFIG_PAX_SEGMEXEC
69329 + if (vma_m)
69330 + kmem_cache_free(vm_area_cachep, vma_m);
69331 +#endif
69332 +
69333 kmem_cache_free(vm_area_cachep, vma);
69334 unacct_error:
69335 if (charged)
69336 @@ -1348,6 +1504,44 @@ unacct_error:
69337 return error;
69338 }
69339
69340 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69341 +{
69342 + if (!vma) {
69343 +#ifdef CONFIG_STACK_GROWSUP
69344 + if (addr > sysctl_heap_stack_gap)
69345 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69346 + else
69347 + vma = find_vma(current->mm, 0);
69348 + if (vma && (vma->vm_flags & VM_GROWSUP))
69349 + return false;
69350 +#endif
69351 + return true;
69352 + }
69353 +
69354 + if (addr + len > vma->vm_start)
69355 + return false;
69356 +
69357 + if (vma->vm_flags & VM_GROWSDOWN)
69358 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69359 +#ifdef CONFIG_STACK_GROWSUP
69360 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69361 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69362 +#endif
69363 +
69364 + return true;
69365 +}
69366 +
69367 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69368 +{
69369 + if (vma->vm_start < len)
69370 + return -ENOMEM;
69371 + if (!(vma->vm_flags & VM_GROWSDOWN))
69372 + return vma->vm_start - len;
69373 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
69374 + return vma->vm_start - len - sysctl_heap_stack_gap;
69375 + return -ENOMEM;
69376 +}
69377 +
69378 /* Get an address range which is currently unmapped.
69379 * For shmat() with addr=0.
69380 *
69381 @@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69382 if (flags & MAP_FIXED)
69383 return addr;
69384
69385 +#ifdef CONFIG_PAX_RANDMMAP
69386 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69387 +#endif
69388 +
69389 if (addr) {
69390 addr = PAGE_ALIGN(addr);
69391 - vma = find_vma(mm, addr);
69392 - if (TASK_SIZE - len >= addr &&
69393 - (!vma || addr + len <= vma->vm_start))
69394 - return addr;
69395 + if (TASK_SIZE - len >= addr) {
69396 + vma = find_vma(mm, addr);
69397 + if (check_heap_stack_gap(vma, addr, len))
69398 + return addr;
69399 + }
69400 }
69401 if (len > mm->cached_hole_size) {
69402 - start_addr = addr = mm->free_area_cache;
69403 + start_addr = addr = mm->free_area_cache;
69404 } else {
69405 - start_addr = addr = TASK_UNMAPPED_BASE;
69406 - mm->cached_hole_size = 0;
69407 + start_addr = addr = mm->mmap_base;
69408 + mm->cached_hole_size = 0;
69409 }
69410
69411 full_search:
69412 @@ -1396,34 +1595,40 @@ full_search:
69413 * Start a new search - just in case we missed
69414 * some holes.
69415 */
69416 - if (start_addr != TASK_UNMAPPED_BASE) {
69417 - addr = TASK_UNMAPPED_BASE;
69418 - start_addr = addr;
69419 + if (start_addr != mm->mmap_base) {
69420 + start_addr = addr = mm->mmap_base;
69421 mm->cached_hole_size = 0;
69422 goto full_search;
69423 }
69424 return -ENOMEM;
69425 }
69426 - if (!vma || addr + len <= vma->vm_start) {
69427 - /*
69428 - * Remember the place where we stopped the search:
69429 - */
69430 - mm->free_area_cache = addr + len;
69431 - return addr;
69432 - }
69433 + if (check_heap_stack_gap(vma, addr, len))
69434 + break;
69435 if (addr + mm->cached_hole_size < vma->vm_start)
69436 mm->cached_hole_size = vma->vm_start - addr;
69437 addr = vma->vm_end;
69438 }
69439 +
69440 + /*
69441 + * Remember the place where we stopped the search:
69442 + */
69443 + mm->free_area_cache = addr + len;
69444 + return addr;
69445 }
69446 #endif
69447
69448 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69449 {
69450 +
69451 +#ifdef CONFIG_PAX_SEGMEXEC
69452 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69453 + return;
69454 +#endif
69455 +
69456 /*
69457 * Is this a new hole at the lowest possible address?
69458 */
69459 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69460 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69461 mm->free_area_cache = addr;
69462 mm->cached_hole_size = ~0UL;
69463 }
69464 @@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69465 {
69466 struct vm_area_struct *vma;
69467 struct mm_struct *mm = current->mm;
69468 - unsigned long addr = addr0;
69469 + unsigned long base = mm->mmap_base, addr = addr0;
69470
69471 /* requested length too big for entire address space */
69472 if (len > TASK_SIZE)
69473 @@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69474 if (flags & MAP_FIXED)
69475 return addr;
69476
69477 +#ifdef CONFIG_PAX_RANDMMAP
69478 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69479 +#endif
69480 +
69481 /* requesting a specific address */
69482 if (addr) {
69483 addr = PAGE_ALIGN(addr);
69484 - vma = find_vma(mm, addr);
69485 - if (TASK_SIZE - len >= addr &&
69486 - (!vma || addr + len <= vma->vm_start))
69487 - return addr;
69488 + if (TASK_SIZE - len >= addr) {
69489 + vma = find_vma(mm, addr);
69490 + if (check_heap_stack_gap(vma, addr, len))
69491 + return addr;
69492 + }
69493 }
69494
69495 /* check if free_area_cache is useful for us */
69496 @@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69497 /* make sure it can fit in the remaining address space */
69498 if (addr > len) {
69499 vma = find_vma(mm, addr-len);
69500 - if (!vma || addr <= vma->vm_start)
69501 + if (check_heap_stack_gap(vma, addr - len, len))
69502 /* remember the address as a hint for next time */
69503 return (mm->free_area_cache = addr-len);
69504 }
69505 @@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69506 * return with success:
69507 */
69508 vma = find_vma(mm, addr);
69509 - if (!vma || addr+len <= vma->vm_start)
69510 + if (check_heap_stack_gap(vma, addr, len))
69511 /* remember the address as a hint for next time */
69512 return (mm->free_area_cache = addr);
69513
69514 @@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69515 mm->cached_hole_size = vma->vm_start - addr;
69516
69517 /* try just below the current vma->vm_start */
69518 - addr = vma->vm_start-len;
69519 - } while (len < vma->vm_start);
69520 + addr = skip_heap_stack_gap(vma, len);
69521 + } while (!IS_ERR_VALUE(addr));
69522
69523 bottomup:
69524 /*
69525 @@ -1507,13 +1717,21 @@ bottomup:
69526 * can happen with large stack limits and large mmap()
69527 * allocations.
69528 */
69529 + mm->mmap_base = TASK_UNMAPPED_BASE;
69530 +
69531 +#ifdef CONFIG_PAX_RANDMMAP
69532 + if (mm->pax_flags & MF_PAX_RANDMMAP)
69533 + mm->mmap_base += mm->delta_mmap;
69534 +#endif
69535 +
69536 + mm->free_area_cache = mm->mmap_base;
69537 mm->cached_hole_size = ~0UL;
69538 - mm->free_area_cache = TASK_UNMAPPED_BASE;
69539 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69540 /*
69541 * Restore the topdown base:
69542 */
69543 - mm->free_area_cache = mm->mmap_base;
69544 + mm->mmap_base = base;
69545 + mm->free_area_cache = base;
69546 mm->cached_hole_size = ~0UL;
69547
69548 return addr;
69549 @@ -1522,6 +1740,12 @@ bottomup:
69550
69551 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69552 {
69553 +
69554 +#ifdef CONFIG_PAX_SEGMEXEC
69555 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69556 + return;
69557 +#endif
69558 +
69559 /*
69560 * Is this a new hole at the highest possible address?
69561 */
69562 @@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69563 mm->free_area_cache = addr;
69564
69565 /* dont allow allocations above current base */
69566 - if (mm->free_area_cache > mm->mmap_base)
69567 + if (mm->free_area_cache > mm->mmap_base) {
69568 mm->free_area_cache = mm->mmap_base;
69569 + mm->cached_hole_size = ~0UL;
69570 + }
69571 }
69572
69573 unsigned long
69574 @@ -1603,40 +1829,42 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
69575
69576 EXPORT_SYMBOL(find_vma);
69577
69578 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
69579 +/*
69580 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
69581 + * Note: pprev is set to NULL when return value is NULL.
69582 + */
69583 struct vm_area_struct *
69584 find_vma_prev(struct mm_struct *mm, unsigned long addr,
69585 struct vm_area_struct **pprev)
69586 {
69587 - struct vm_area_struct *vma = NULL, *prev = NULL;
69588 - struct rb_node *rb_node;
69589 - if (!mm)
69590 - goto out;
69591 + struct vm_area_struct *vma;
69592
69593 - /* Guard against addr being lower than the first VMA */
69594 - vma = mm->mmap;
69595 + vma = find_vma(mm, addr);
69596 + *pprev = vma ? vma->vm_prev : NULL;
69597 + return vma;
69598 +}
69599
69600 - /* Go through the RB tree quickly. */
69601 - rb_node = mm->mm_rb.rb_node;
69602 +#ifdef CONFIG_PAX_SEGMEXEC
69603 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69604 +{
69605 + struct vm_area_struct *vma_m;
69606
69607 - while (rb_node) {
69608 - struct vm_area_struct *vma_tmp;
69609 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
69610 -
69611 - if (addr < vma_tmp->vm_end) {
69612 - rb_node = rb_node->rb_left;
69613 - } else {
69614 - prev = vma_tmp;
69615 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
69616 - break;
69617 - rb_node = rb_node->rb_right;
69618 - }
69619 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69620 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69621 + BUG_ON(vma->vm_mirror);
69622 + return NULL;
69623 }
69624 -
69625 -out:
69626 - *pprev = prev;
69627 - return prev ? prev->vm_next : vma;
69628 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69629 + vma_m = vma->vm_mirror;
69630 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69631 + BUG_ON(vma->vm_file != vma_m->vm_file);
69632 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69633 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69634 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69635 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69636 + return vma_m;
69637 }
69638 +#endif
69639
69640 /*
69641 * Verify that the stack growth is acceptable and
69642 @@ -1654,6 +1882,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69643 return -ENOMEM;
69644
69645 /* Stack limit test */
69646 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
69647 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69648 return -ENOMEM;
69649
69650 @@ -1664,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69651 locked = mm->locked_vm + grow;
69652 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69653 limit >>= PAGE_SHIFT;
69654 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69655 if (locked > limit && !capable(CAP_IPC_LOCK))
69656 return -ENOMEM;
69657 }
69658 @@ -1694,37 +1924,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69659 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69660 * vma is the last one with address > vma->vm_end. Have to extend vma.
69661 */
69662 +#ifndef CONFIG_IA64
69663 +static
69664 +#endif
69665 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69666 {
69667 int error;
69668 + bool locknext;
69669
69670 if (!(vma->vm_flags & VM_GROWSUP))
69671 return -EFAULT;
69672
69673 + /* Also guard against wrapping around to address 0. */
69674 + if (address < PAGE_ALIGN(address+1))
69675 + address = PAGE_ALIGN(address+1);
69676 + else
69677 + return -ENOMEM;
69678 +
69679 /*
69680 * We must make sure the anon_vma is allocated
69681 * so that the anon_vma locking is not a noop.
69682 */
69683 if (unlikely(anon_vma_prepare(vma)))
69684 return -ENOMEM;
69685 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69686 + if (locknext && anon_vma_prepare(vma->vm_next))
69687 + return -ENOMEM;
69688 vma_lock_anon_vma(vma);
69689 + if (locknext)
69690 + vma_lock_anon_vma(vma->vm_next);
69691
69692 /*
69693 * vma->vm_start/vm_end cannot change under us because the caller
69694 * is required to hold the mmap_sem in read mode. We need the
69695 - * anon_vma lock to serialize against concurrent expand_stacks.
69696 - * Also guard against wrapping around to address 0.
69697 + * anon_vma locks to serialize against concurrent expand_stacks
69698 + * and expand_upwards.
69699 */
69700 - if (address < PAGE_ALIGN(address+4))
69701 - address = PAGE_ALIGN(address+4);
69702 - else {
69703 - vma_unlock_anon_vma(vma);
69704 - return -ENOMEM;
69705 - }
69706 error = 0;
69707
69708 /* Somebody else might have raced and expanded it already */
69709 - if (address > vma->vm_end) {
69710 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69711 + error = -ENOMEM;
69712 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69713 unsigned long size, grow;
69714
69715 size = address - vma->vm_start;
69716 @@ -1739,6 +1980,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69717 }
69718 }
69719 }
69720 + if (locknext)
69721 + vma_unlock_anon_vma(vma->vm_next);
69722 vma_unlock_anon_vma(vma);
69723 khugepaged_enter_vma_merge(vma);
69724 return error;
69725 @@ -1752,6 +1995,8 @@ int expand_downwards(struct vm_area_struct *vma,
69726 unsigned long address)
69727 {
69728 int error;
69729 + bool lockprev = false;
69730 + struct vm_area_struct *prev;
69731
69732 /*
69733 * We must make sure the anon_vma is allocated
69734 @@ -1765,6 +2010,15 @@ int expand_downwards(struct vm_area_struct *vma,
69735 if (error)
69736 return error;
69737
69738 + prev = vma->vm_prev;
69739 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69740 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69741 +#endif
69742 + if (lockprev && anon_vma_prepare(prev))
69743 + return -ENOMEM;
69744 + if (lockprev)
69745 + vma_lock_anon_vma(prev);
69746 +
69747 vma_lock_anon_vma(vma);
69748
69749 /*
69750 @@ -1774,9 +2028,17 @@ int expand_downwards(struct vm_area_struct *vma,
69751 */
69752
69753 /* Somebody else might have raced and expanded it already */
69754 - if (address < vma->vm_start) {
69755 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69756 + error = -ENOMEM;
69757 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69758 unsigned long size, grow;
69759
69760 +#ifdef CONFIG_PAX_SEGMEXEC
69761 + struct vm_area_struct *vma_m;
69762 +
69763 + vma_m = pax_find_mirror_vma(vma);
69764 +#endif
69765 +
69766 size = vma->vm_end - address;
69767 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69768
69769 @@ -1786,11 +2048,22 @@ int expand_downwards(struct vm_area_struct *vma,
69770 if (!error) {
69771 vma->vm_start = address;
69772 vma->vm_pgoff -= grow;
69773 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69774 +
69775 +#ifdef CONFIG_PAX_SEGMEXEC
69776 + if (vma_m) {
69777 + vma_m->vm_start -= grow << PAGE_SHIFT;
69778 + vma_m->vm_pgoff -= grow;
69779 + }
69780 +#endif
69781 +
69782 perf_event_mmap(vma);
69783 }
69784 }
69785 }
69786 vma_unlock_anon_vma(vma);
69787 + if (lockprev)
69788 + vma_unlock_anon_vma(prev);
69789 khugepaged_enter_vma_merge(vma);
69790 return error;
69791 }
69792 @@ -1860,6 +2133,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69793 do {
69794 long nrpages = vma_pages(vma);
69795
69796 +#ifdef CONFIG_PAX_SEGMEXEC
69797 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69798 + vma = remove_vma(vma);
69799 + continue;
69800 + }
69801 +#endif
69802 +
69803 mm->total_vm -= nrpages;
69804 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69805 vma = remove_vma(vma);
69806 @@ -1905,6 +2185,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69807 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69808 vma->vm_prev = NULL;
69809 do {
69810 +
69811 +#ifdef CONFIG_PAX_SEGMEXEC
69812 + if (vma->vm_mirror) {
69813 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69814 + vma->vm_mirror->vm_mirror = NULL;
69815 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
69816 + vma->vm_mirror = NULL;
69817 + }
69818 +#endif
69819 +
69820 rb_erase(&vma->vm_rb, &mm->mm_rb);
69821 mm->map_count--;
69822 tail_vma = vma;
69823 @@ -1933,14 +2223,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69824 struct vm_area_struct *new;
69825 int err = -ENOMEM;
69826
69827 +#ifdef CONFIG_PAX_SEGMEXEC
69828 + struct vm_area_struct *vma_m, *new_m = NULL;
69829 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69830 +#endif
69831 +
69832 if (is_vm_hugetlb_page(vma) && (addr &
69833 ~(huge_page_mask(hstate_vma(vma)))))
69834 return -EINVAL;
69835
69836 +#ifdef CONFIG_PAX_SEGMEXEC
69837 + vma_m = pax_find_mirror_vma(vma);
69838 +#endif
69839 +
69840 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69841 if (!new)
69842 goto out_err;
69843
69844 +#ifdef CONFIG_PAX_SEGMEXEC
69845 + if (vma_m) {
69846 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69847 + if (!new_m) {
69848 + kmem_cache_free(vm_area_cachep, new);
69849 + goto out_err;
69850 + }
69851 + }
69852 +#endif
69853 +
69854 /* most fields are the same, copy all, and then fixup */
69855 *new = *vma;
69856
69857 @@ -1953,6 +2262,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69858 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69859 }
69860
69861 +#ifdef CONFIG_PAX_SEGMEXEC
69862 + if (vma_m) {
69863 + *new_m = *vma_m;
69864 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
69865 + new_m->vm_mirror = new;
69866 + new->vm_mirror = new_m;
69867 +
69868 + if (new_below)
69869 + new_m->vm_end = addr_m;
69870 + else {
69871 + new_m->vm_start = addr_m;
69872 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69873 + }
69874 + }
69875 +#endif
69876 +
69877 pol = mpol_dup(vma_policy(vma));
69878 if (IS_ERR(pol)) {
69879 err = PTR_ERR(pol);
69880 @@ -1978,6 +2303,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69881 else
69882 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69883
69884 +#ifdef CONFIG_PAX_SEGMEXEC
69885 + if (!err && vma_m) {
69886 + if (anon_vma_clone(new_m, vma_m))
69887 + goto out_free_mpol;
69888 +
69889 + mpol_get(pol);
69890 + vma_set_policy(new_m, pol);
69891 +
69892 + if (new_m->vm_file) {
69893 + get_file(new_m->vm_file);
69894 + if (vma_m->vm_flags & VM_EXECUTABLE)
69895 + added_exe_file_vma(mm);
69896 + }
69897 +
69898 + if (new_m->vm_ops && new_m->vm_ops->open)
69899 + new_m->vm_ops->open(new_m);
69900 +
69901 + if (new_below)
69902 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69903 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69904 + else
69905 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69906 +
69907 + if (err) {
69908 + if (new_m->vm_ops && new_m->vm_ops->close)
69909 + new_m->vm_ops->close(new_m);
69910 + if (new_m->vm_file) {
69911 + if (vma_m->vm_flags & VM_EXECUTABLE)
69912 + removed_exe_file_vma(mm);
69913 + fput(new_m->vm_file);
69914 + }
69915 + mpol_put(pol);
69916 + }
69917 + }
69918 +#endif
69919 +
69920 /* Success. */
69921 if (!err)
69922 return 0;
69923 @@ -1990,10 +2351,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69924 removed_exe_file_vma(mm);
69925 fput(new->vm_file);
69926 }
69927 - unlink_anon_vmas(new);
69928 out_free_mpol:
69929 mpol_put(pol);
69930 out_free_vma:
69931 +
69932 +#ifdef CONFIG_PAX_SEGMEXEC
69933 + if (new_m) {
69934 + unlink_anon_vmas(new_m);
69935 + kmem_cache_free(vm_area_cachep, new_m);
69936 + }
69937 +#endif
69938 +
69939 + unlink_anon_vmas(new);
69940 kmem_cache_free(vm_area_cachep, new);
69941 out_err:
69942 return err;
69943 @@ -2006,6 +2375,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69944 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69945 unsigned long addr, int new_below)
69946 {
69947 +
69948 +#ifdef CONFIG_PAX_SEGMEXEC
69949 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69950 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69951 + if (mm->map_count >= sysctl_max_map_count-1)
69952 + return -ENOMEM;
69953 + } else
69954 +#endif
69955 +
69956 if (mm->map_count >= sysctl_max_map_count)
69957 return -ENOMEM;
69958
69959 @@ -2017,11 +2395,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69960 * work. This now handles partial unmappings.
69961 * Jeremy Fitzhardinge <jeremy@goop.org>
69962 */
69963 +#ifdef CONFIG_PAX_SEGMEXEC
69964 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69965 {
69966 + int ret = __do_munmap(mm, start, len);
69967 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69968 + return ret;
69969 +
69970 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69971 +}
69972 +
69973 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69974 +#else
69975 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69976 +#endif
69977 +{
69978 unsigned long end;
69979 struct vm_area_struct *vma, *prev, *last;
69980
69981 + /*
69982 + * mm->mmap_sem is required to protect against another thread
69983 + * changing the mappings in case we sleep.
69984 + */
69985 + verify_mm_writelocked(mm);
69986 +
69987 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69988 return -EINVAL;
69989
69990 @@ -2096,6 +2493,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69991 /* Fix up all other VM information */
69992 remove_vma_list(mm, vma);
69993
69994 + track_exec_limit(mm, start, end, 0UL);
69995 +
69996 return 0;
69997 }
69998
69999 @@ -2108,22 +2507,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
70000
70001 profile_munmap(addr);
70002
70003 +#ifdef CONFIG_PAX_SEGMEXEC
70004 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
70005 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
70006 + return -EINVAL;
70007 +#endif
70008 +
70009 down_write(&mm->mmap_sem);
70010 ret = do_munmap(mm, addr, len);
70011 up_write(&mm->mmap_sem);
70012 return ret;
70013 }
70014
70015 -static inline void verify_mm_writelocked(struct mm_struct *mm)
70016 -{
70017 -#ifdef CONFIG_DEBUG_VM
70018 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70019 - WARN_ON(1);
70020 - up_read(&mm->mmap_sem);
70021 - }
70022 -#endif
70023 -}
70024 -
70025 /*
70026 * this is really a simplified "do_mmap". it only handles
70027 * anonymous maps. eventually we may be able to do some
70028 @@ -2137,6 +2532,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70029 struct rb_node ** rb_link, * rb_parent;
70030 pgoff_t pgoff = addr >> PAGE_SHIFT;
70031 int error;
70032 + unsigned long charged;
70033
70034 len = PAGE_ALIGN(len);
70035 if (!len)
70036 @@ -2148,16 +2544,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70037
70038 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
70039
70040 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
70041 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
70042 + flags &= ~VM_EXEC;
70043 +
70044 +#ifdef CONFIG_PAX_MPROTECT
70045 + if (mm->pax_flags & MF_PAX_MPROTECT)
70046 + flags &= ~VM_MAYEXEC;
70047 +#endif
70048 +
70049 + }
70050 +#endif
70051 +
70052 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
70053 if (error & ~PAGE_MASK)
70054 return error;
70055
70056 + charged = len >> PAGE_SHIFT;
70057 +
70058 /*
70059 * mlock MCL_FUTURE?
70060 */
70061 if (mm->def_flags & VM_LOCKED) {
70062 unsigned long locked, lock_limit;
70063 - locked = len >> PAGE_SHIFT;
70064 + locked = charged;
70065 locked += mm->locked_vm;
70066 lock_limit = rlimit(RLIMIT_MEMLOCK);
70067 lock_limit >>= PAGE_SHIFT;
70068 @@ -2174,22 +2584,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70069 /*
70070 * Clear old maps. this also does some error checking for us
70071 */
70072 - munmap_back:
70073 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70074 if (vma && vma->vm_start < addr + len) {
70075 if (do_munmap(mm, addr, len))
70076 return -ENOMEM;
70077 - goto munmap_back;
70078 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70079 + BUG_ON(vma && vma->vm_start < addr + len);
70080 }
70081
70082 /* Check against address space limits *after* clearing old maps... */
70083 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
70084 + if (!may_expand_vm(mm, charged))
70085 return -ENOMEM;
70086
70087 if (mm->map_count > sysctl_max_map_count)
70088 return -ENOMEM;
70089
70090 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
70091 + if (security_vm_enough_memory(charged))
70092 return -ENOMEM;
70093
70094 /* Can we just expand an old private anonymous mapping? */
70095 @@ -2203,7 +2613,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70096 */
70097 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70098 if (!vma) {
70099 - vm_unacct_memory(len >> PAGE_SHIFT);
70100 + vm_unacct_memory(charged);
70101 return -ENOMEM;
70102 }
70103
70104 @@ -2217,11 +2627,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70105 vma_link(mm, vma, prev, rb_link, rb_parent);
70106 out:
70107 perf_event_mmap(vma);
70108 - mm->total_vm += len >> PAGE_SHIFT;
70109 + mm->total_vm += charged;
70110 if (flags & VM_LOCKED) {
70111 if (!mlock_vma_pages_range(vma, addr, addr + len))
70112 - mm->locked_vm += (len >> PAGE_SHIFT);
70113 + mm->locked_vm += charged;
70114 }
70115 + track_exec_limit(mm, addr, addr + len, flags);
70116 return addr;
70117 }
70118
70119 @@ -2268,8 +2679,10 @@ void exit_mmap(struct mm_struct *mm)
70120 * Walk the list again, actually closing and freeing it,
70121 * with preemption enabled, without holding any MM locks.
70122 */
70123 - while (vma)
70124 + while (vma) {
70125 + vma->vm_mirror = NULL;
70126 vma = remove_vma(vma);
70127 + }
70128
70129 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
70130 }
70131 @@ -2283,6 +2696,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
70132 struct vm_area_struct * __vma, * prev;
70133 struct rb_node ** rb_link, * rb_parent;
70134
70135 +#ifdef CONFIG_PAX_SEGMEXEC
70136 + struct vm_area_struct *vma_m = NULL;
70137 +#endif
70138 +
70139 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
70140 + return -EPERM;
70141 +
70142 /*
70143 * The vm_pgoff of a purely anonymous vma should be irrelevant
70144 * until its first write fault, when page's anon_vma and index
70145 @@ -2305,7 +2725,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
70146 if ((vma->vm_flags & VM_ACCOUNT) &&
70147 security_vm_enough_memory_mm(mm, vma_pages(vma)))
70148 return -ENOMEM;
70149 +
70150 +#ifdef CONFIG_PAX_SEGMEXEC
70151 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
70152 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70153 + if (!vma_m)
70154 + return -ENOMEM;
70155 + }
70156 +#endif
70157 +
70158 vma_link(mm, vma, prev, rb_link, rb_parent);
70159 +
70160 +#ifdef CONFIG_PAX_SEGMEXEC
70161 + if (vma_m)
70162 + BUG_ON(pax_mirror_vma(vma_m, vma));
70163 +#endif
70164 +
70165 return 0;
70166 }
70167
70168 @@ -2323,6 +2758,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
70169 struct rb_node **rb_link, *rb_parent;
70170 struct mempolicy *pol;
70171
70172 + BUG_ON(vma->vm_mirror);
70173 +
70174 /*
70175 * If anonymous vma has not yet been faulted, update new pgoff
70176 * to match new location, to increase its chance of merging.
70177 @@ -2373,6 +2810,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
70178 return NULL;
70179 }
70180
70181 +#ifdef CONFIG_PAX_SEGMEXEC
70182 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
70183 +{
70184 + struct vm_area_struct *prev_m;
70185 + struct rb_node **rb_link_m, *rb_parent_m;
70186 + struct mempolicy *pol_m;
70187 +
70188 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
70189 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
70190 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
70191 + *vma_m = *vma;
70192 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
70193 + if (anon_vma_clone(vma_m, vma))
70194 + return -ENOMEM;
70195 + pol_m = vma_policy(vma_m);
70196 + mpol_get(pol_m);
70197 + vma_set_policy(vma_m, pol_m);
70198 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
70199 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
70200 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
70201 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
70202 + if (vma_m->vm_file)
70203 + get_file(vma_m->vm_file);
70204 + if (vma_m->vm_ops && vma_m->vm_ops->open)
70205 + vma_m->vm_ops->open(vma_m);
70206 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
70207 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
70208 + vma_m->vm_mirror = vma;
70209 + vma->vm_mirror = vma_m;
70210 + return 0;
70211 +}
70212 +#endif
70213 +
70214 /*
70215 * Return true if the calling process may expand its vm space by the passed
70216 * number of pages
70217 @@ -2383,7 +2853,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
70218 unsigned long lim;
70219
70220 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
70221 -
70222 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
70223 if (cur + npages > lim)
70224 return 0;
70225 return 1;
70226 @@ -2454,6 +2924,22 @@ int install_special_mapping(struct mm_struct *mm,
70227 vma->vm_start = addr;
70228 vma->vm_end = addr + len;
70229
70230 +#ifdef CONFIG_PAX_MPROTECT
70231 + if (mm->pax_flags & MF_PAX_MPROTECT) {
70232 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
70233 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
70234 + return -EPERM;
70235 + if (!(vm_flags & VM_EXEC))
70236 + vm_flags &= ~VM_MAYEXEC;
70237 +#else
70238 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70239 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70240 +#endif
70241 + else
70242 + vm_flags &= ~VM_MAYWRITE;
70243 + }
70244 +#endif
70245 +
70246 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
70247 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70248
70249 diff --git a/mm/mprotect.c b/mm/mprotect.c
70250 index 5a688a2..27e031c 100644
70251 --- a/mm/mprotect.c
70252 +++ b/mm/mprotect.c
70253 @@ -23,10 +23,16 @@
70254 #include <linux/mmu_notifier.h>
70255 #include <linux/migrate.h>
70256 #include <linux/perf_event.h>
70257 +
70258 +#ifdef CONFIG_PAX_MPROTECT
70259 +#include <linux/elf.h>
70260 +#endif
70261 +
70262 #include <asm/uaccess.h>
70263 #include <asm/pgtable.h>
70264 #include <asm/cacheflush.h>
70265 #include <asm/tlbflush.h>
70266 +#include <asm/mmu_context.h>
70267
70268 #ifndef pgprot_modify
70269 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
70270 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
70271 flush_tlb_range(vma, start, end);
70272 }
70273
70274 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70275 +/* called while holding the mmap semaphor for writing except stack expansion */
70276 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
70277 +{
70278 + unsigned long oldlimit, newlimit = 0UL;
70279 +
70280 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
70281 + return;
70282 +
70283 + spin_lock(&mm->page_table_lock);
70284 + oldlimit = mm->context.user_cs_limit;
70285 + if ((prot & VM_EXEC) && oldlimit < end)
70286 + /* USER_CS limit moved up */
70287 + newlimit = end;
70288 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
70289 + /* USER_CS limit moved down */
70290 + newlimit = start;
70291 +
70292 + if (newlimit) {
70293 + mm->context.user_cs_limit = newlimit;
70294 +
70295 +#ifdef CONFIG_SMP
70296 + wmb();
70297 + cpus_clear(mm->context.cpu_user_cs_mask);
70298 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
70299 +#endif
70300 +
70301 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
70302 + }
70303 + spin_unlock(&mm->page_table_lock);
70304 + if (newlimit == end) {
70305 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
70306 +
70307 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
70308 + if (is_vm_hugetlb_page(vma))
70309 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
70310 + else
70311 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
70312 + }
70313 +}
70314 +#endif
70315 +
70316 int
70317 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70318 unsigned long start, unsigned long end, unsigned long newflags)
70319 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70320 int error;
70321 int dirty_accountable = 0;
70322
70323 +#ifdef CONFIG_PAX_SEGMEXEC
70324 + struct vm_area_struct *vma_m = NULL;
70325 + unsigned long start_m, end_m;
70326 +
70327 + start_m = start + SEGMEXEC_TASK_SIZE;
70328 + end_m = end + SEGMEXEC_TASK_SIZE;
70329 +#endif
70330 +
70331 if (newflags == oldflags) {
70332 *pprev = vma;
70333 return 0;
70334 }
70335
70336 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
70337 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
70338 +
70339 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
70340 + return -ENOMEM;
70341 +
70342 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
70343 + return -ENOMEM;
70344 + }
70345 +
70346 /*
70347 * If we make a private mapping writable we increase our commit;
70348 * but (without finer accounting) cannot reduce our commit if we
70349 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70350 }
70351 }
70352
70353 +#ifdef CONFIG_PAX_SEGMEXEC
70354 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
70355 + if (start != vma->vm_start) {
70356 + error = split_vma(mm, vma, start, 1);
70357 + if (error)
70358 + goto fail;
70359 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70360 + *pprev = (*pprev)->vm_next;
70361 + }
70362 +
70363 + if (end != vma->vm_end) {
70364 + error = split_vma(mm, vma, end, 0);
70365 + if (error)
70366 + goto fail;
70367 + }
70368 +
70369 + if (pax_find_mirror_vma(vma)) {
70370 + error = __do_munmap(mm, start_m, end_m - start_m);
70371 + if (error)
70372 + goto fail;
70373 + } else {
70374 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70375 + if (!vma_m) {
70376 + error = -ENOMEM;
70377 + goto fail;
70378 + }
70379 + vma->vm_flags = newflags;
70380 + error = pax_mirror_vma(vma_m, vma);
70381 + if (error) {
70382 + vma->vm_flags = oldflags;
70383 + goto fail;
70384 + }
70385 + }
70386 + }
70387 +#endif
70388 +
70389 /*
70390 * First try to merge with previous and/or next vma.
70391 */
70392 @@ -204,9 +306,21 @@ success:
70393 * vm_flags and vm_page_prot are protected by the mmap_sem
70394 * held in write mode.
70395 */
70396 +
70397 +#ifdef CONFIG_PAX_SEGMEXEC
70398 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70399 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70400 +#endif
70401 +
70402 vma->vm_flags = newflags;
70403 +
70404 +#ifdef CONFIG_PAX_MPROTECT
70405 + if (mm->binfmt && mm->binfmt->handle_mprotect)
70406 + mm->binfmt->handle_mprotect(vma, newflags);
70407 +#endif
70408 +
70409 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70410 - vm_get_page_prot(newflags));
70411 + vm_get_page_prot(vma->vm_flags));
70412
70413 if (vma_wants_writenotify(vma)) {
70414 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70415 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70416 end = start + len;
70417 if (end <= start)
70418 return -ENOMEM;
70419 +
70420 +#ifdef CONFIG_PAX_SEGMEXEC
70421 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70422 + if (end > SEGMEXEC_TASK_SIZE)
70423 + return -EINVAL;
70424 + } else
70425 +#endif
70426 +
70427 + if (end > TASK_SIZE)
70428 + return -EINVAL;
70429 +
70430 if (!arch_validate_prot(prot))
70431 return -EINVAL;
70432
70433 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70434 /*
70435 * Does the application expect PROT_READ to imply PROT_EXEC:
70436 */
70437 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70438 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70439 prot |= PROT_EXEC;
70440
70441 vm_flags = calc_vm_prot_bits(prot);
70442 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70443 if (start > vma->vm_start)
70444 prev = vma;
70445
70446 +#ifdef CONFIG_PAX_MPROTECT
70447 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70448 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
70449 +#endif
70450 +
70451 for (nstart = start ; ; ) {
70452 unsigned long newflags;
70453
70454 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70455
70456 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70457 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70458 + if (prot & (PROT_WRITE | PROT_EXEC))
70459 + gr_log_rwxmprotect(vma->vm_file);
70460 +
70461 + error = -EACCES;
70462 + goto out;
70463 + }
70464 +
70465 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70466 error = -EACCES;
70467 goto out;
70468 }
70469 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70470 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70471 if (error)
70472 goto out;
70473 +
70474 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
70475 +
70476 nstart = tmp;
70477
70478 if (nstart < prev->vm_end)
70479 diff --git a/mm/mremap.c b/mm/mremap.c
70480 index d6959cb..18a402a 100644
70481 --- a/mm/mremap.c
70482 +++ b/mm/mremap.c
70483 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70484 continue;
70485 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70486 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70487 +
70488 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70489 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70490 + pte = pte_exprotect(pte);
70491 +#endif
70492 +
70493 set_pte_at(mm, new_addr, new_pte, pte);
70494 }
70495
70496 @@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70497 if (is_vm_hugetlb_page(vma))
70498 goto Einval;
70499
70500 +#ifdef CONFIG_PAX_SEGMEXEC
70501 + if (pax_find_mirror_vma(vma))
70502 + goto Einval;
70503 +#endif
70504 +
70505 /* We can't remap across vm area boundaries */
70506 if (old_len > vma->vm_end - addr)
70507 goto Efault;
70508 @@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70509 unsigned long ret = -EINVAL;
70510 unsigned long charged = 0;
70511 unsigned long map_flags;
70512 + unsigned long pax_task_size = TASK_SIZE;
70513
70514 if (new_addr & ~PAGE_MASK)
70515 goto out;
70516
70517 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70518 +#ifdef CONFIG_PAX_SEGMEXEC
70519 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70520 + pax_task_size = SEGMEXEC_TASK_SIZE;
70521 +#endif
70522 +
70523 + pax_task_size -= PAGE_SIZE;
70524 +
70525 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70526 goto out;
70527
70528 /* Check if the location we're moving into overlaps the
70529 * old location at all, and fail if it does.
70530 */
70531 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
70532 - goto out;
70533 -
70534 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
70535 + if (addr + old_len > new_addr && new_addr + new_len > addr)
70536 goto out;
70537
70538 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70539 @@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70540 struct vm_area_struct *vma;
70541 unsigned long ret = -EINVAL;
70542 unsigned long charged = 0;
70543 + unsigned long pax_task_size = TASK_SIZE;
70544
70545 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70546 goto out;
70547 @@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70548 if (!new_len)
70549 goto out;
70550
70551 +#ifdef CONFIG_PAX_SEGMEXEC
70552 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70553 + pax_task_size = SEGMEXEC_TASK_SIZE;
70554 +#endif
70555 +
70556 + pax_task_size -= PAGE_SIZE;
70557 +
70558 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70559 + old_len > pax_task_size || addr > pax_task_size-old_len)
70560 + goto out;
70561 +
70562 if (flags & MREMAP_FIXED) {
70563 if (flags & MREMAP_MAYMOVE)
70564 ret = mremap_to(addr, old_len, new_addr, new_len);
70565 @@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70566 addr + new_len);
70567 }
70568 ret = addr;
70569 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70570 goto out;
70571 }
70572 }
70573 @@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70574 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70575 if (ret)
70576 goto out;
70577 +
70578 + map_flags = vma->vm_flags;
70579 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70580 + if (!(ret & ~PAGE_MASK)) {
70581 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70582 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70583 + }
70584 }
70585 out:
70586 if (ret & ~PAGE_MASK)
70587 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70588 index 7fa41b4..6087460 100644
70589 --- a/mm/nobootmem.c
70590 +++ b/mm/nobootmem.c
70591 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70592 unsigned long __init free_all_memory_core_early(int nodeid)
70593 {
70594 int i;
70595 - u64 start, end;
70596 + u64 start, end, startrange, endrange;
70597 unsigned long count = 0;
70598 - struct range *range = NULL;
70599 + struct range *range = NULL, rangerange = { 0, 0 };
70600 int nr_range;
70601
70602 nr_range = get_free_all_memory_range(&range, nodeid);
70603 + startrange = __pa(range) >> PAGE_SHIFT;
70604 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70605
70606 for (i = 0; i < nr_range; i++) {
70607 start = range[i].start;
70608 end = range[i].end;
70609 + if (start <= endrange && startrange < end) {
70610 + BUG_ON(rangerange.start | rangerange.end);
70611 + rangerange = range[i];
70612 + continue;
70613 + }
70614 count += end - start;
70615 __free_pages_memory(start, end);
70616 }
70617 + start = rangerange.start;
70618 + end = rangerange.end;
70619 + count += end - start;
70620 + __free_pages_memory(start, end);
70621
70622 return count;
70623 }
70624 diff --git a/mm/nommu.c b/mm/nommu.c
70625 index b982290..7d73f53 100644
70626 --- a/mm/nommu.c
70627 +++ b/mm/nommu.c
70628 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70629 int sysctl_overcommit_ratio = 50; /* default is 50% */
70630 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70631 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70632 -int heap_stack_gap = 0;
70633
70634 atomic_long_t mmap_pages_allocated;
70635
70636 @@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70637 EXPORT_SYMBOL(find_vma);
70638
70639 /*
70640 - * find a VMA
70641 - * - we don't extend stack VMAs under NOMMU conditions
70642 - */
70643 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70644 -{
70645 - return find_vma(mm, addr);
70646 -}
70647 -
70648 -/*
70649 * expand a stack to a given address
70650 * - not supported under NOMMU conditions
70651 */
70652 @@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70653
70654 /* most fields are the same, copy all, and then fixup */
70655 *new = *vma;
70656 + INIT_LIST_HEAD(&new->anon_vma_chain);
70657 *region = *vma->vm_region;
70658 new->vm_region = region;
70659
70660 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70661 index 485be89..c059ad3 100644
70662 --- a/mm/page_alloc.c
70663 +++ b/mm/page_alloc.c
70664 @@ -341,7 +341,7 @@ out:
70665 * This usage means that zero-order pages may not be compound.
70666 */
70667
70668 -static void free_compound_page(struct page *page)
70669 +void free_compound_page(struct page *page)
70670 {
70671 __free_pages_ok(page, compound_order(page));
70672 }
70673 @@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70674 int i;
70675 int bad = 0;
70676
70677 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70678 + unsigned long index = 1UL << order;
70679 +#endif
70680 +
70681 trace_mm_page_free_direct(page, order);
70682 kmemcheck_free_shadow(page, order);
70683
70684 @@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70685 debug_check_no_obj_freed(page_address(page),
70686 PAGE_SIZE << order);
70687 }
70688 +
70689 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70690 + for (; index; --index)
70691 + sanitize_highpage(page + index - 1);
70692 +#endif
70693 +
70694 arch_free_page(page, order);
70695 kernel_map_pages(page, 1 << order, 0);
70696
70697 @@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70698 arch_alloc_page(page, order);
70699 kernel_map_pages(page, 1 << order, 1);
70700
70701 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
70702 if (gfp_flags & __GFP_ZERO)
70703 prep_zero_page(page, order, gfp_flags);
70704 +#endif
70705
70706 if (order && (gfp_flags & __GFP_COMP))
70707 prep_compound_page(page, order);
70708 @@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70709 unsigned long pfn;
70710
70711 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70712 +#ifdef CONFIG_X86_32
70713 + /* boot failures in VMware 8 on 32bit vanilla since
70714 + this change */
70715 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70716 +#else
70717 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70718 +#endif
70719 return 1;
70720 }
70721 return 0;
70722 diff --git a/mm/percpu.c b/mm/percpu.c
70723 index 716eb4a..8d10419 100644
70724 --- a/mm/percpu.c
70725 +++ b/mm/percpu.c
70726 @@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70727 static unsigned int pcpu_high_unit_cpu __read_mostly;
70728
70729 /* the address of the first chunk which starts with the kernel static area */
70730 -void *pcpu_base_addr __read_mostly;
70731 +void *pcpu_base_addr __read_only;
70732 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70733
70734 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70735 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70736 index e920aa3..137702a 100644
70737 --- a/mm/process_vm_access.c
70738 +++ b/mm/process_vm_access.c
70739 @@ -13,6 +13,7 @@
70740 #include <linux/uio.h>
70741 #include <linux/sched.h>
70742 #include <linux/highmem.h>
70743 +#include <linux/security.h>
70744 #include <linux/ptrace.h>
70745 #include <linux/slab.h>
70746 #include <linux/syscalls.h>
70747 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70748 size_t iov_l_curr_offset = 0;
70749 ssize_t iov_len;
70750
70751 + return -ENOSYS; // PaX: until properly audited
70752 +
70753 /*
70754 * Work out how many pages of struct pages we're going to need
70755 * when eventually calling get_user_pages
70756 */
70757 for (i = 0; i < riovcnt; i++) {
70758 iov_len = rvec[i].iov_len;
70759 - if (iov_len > 0) {
70760 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
70761 - + iov_len)
70762 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70763 - / PAGE_SIZE + 1;
70764 - nr_pages = max(nr_pages, nr_pages_iov);
70765 - }
70766 + if (iov_len <= 0)
70767 + continue;
70768 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70769 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70770 + nr_pages = max(nr_pages, nr_pages_iov);
70771 }
70772
70773 if (nr_pages == 0)
70774 @@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70775 goto free_proc_pages;
70776 }
70777
70778 - task_lock(task);
70779 - if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70780 - task_unlock(task);
70781 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70782 rc = -EPERM;
70783 goto put_task_struct;
70784 }
70785 - mm = task->mm;
70786
70787 - if (!mm || (task->flags & PF_KTHREAD)) {
70788 - task_unlock(task);
70789 - rc = -EINVAL;
70790 + mm = mm_access(task, PTRACE_MODE_ATTACH);
70791 + if (!mm || IS_ERR(mm)) {
70792 + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
70793 + /*
70794 + * Explicitly map EACCES to EPERM as EPERM is a more a
70795 + * appropriate error code for process_vw_readv/writev
70796 + */
70797 + if (rc == -EACCES)
70798 + rc = -EPERM;
70799 goto put_task_struct;
70800 }
70801
70802 - atomic_inc(&mm->mm_users);
70803 - task_unlock(task);
70804 -
70805 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
70806 rc = process_vm_rw_single_vec(
70807 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
70808 diff --git a/mm/rmap.c b/mm/rmap.c
70809 index a4fd368..e0ffec7 100644
70810 --- a/mm/rmap.c
70811 +++ b/mm/rmap.c
70812 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70813 struct anon_vma *anon_vma = vma->anon_vma;
70814 struct anon_vma_chain *avc;
70815
70816 +#ifdef CONFIG_PAX_SEGMEXEC
70817 + struct anon_vma_chain *avc_m = NULL;
70818 +#endif
70819 +
70820 might_sleep();
70821 if (unlikely(!anon_vma)) {
70822 struct mm_struct *mm = vma->vm_mm;
70823 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70824 if (!avc)
70825 goto out_enomem;
70826
70827 +#ifdef CONFIG_PAX_SEGMEXEC
70828 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70829 + if (!avc_m)
70830 + goto out_enomem_free_avc;
70831 +#endif
70832 +
70833 anon_vma = find_mergeable_anon_vma(vma);
70834 allocated = NULL;
70835 if (!anon_vma) {
70836 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70837 /* page_table_lock to protect against threads */
70838 spin_lock(&mm->page_table_lock);
70839 if (likely(!vma->anon_vma)) {
70840 +
70841 +#ifdef CONFIG_PAX_SEGMEXEC
70842 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70843 +
70844 + if (vma_m) {
70845 + BUG_ON(vma_m->anon_vma);
70846 + vma_m->anon_vma = anon_vma;
70847 + avc_m->anon_vma = anon_vma;
70848 + avc_m->vma = vma;
70849 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70850 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
70851 + avc_m = NULL;
70852 + }
70853 +#endif
70854 +
70855 vma->anon_vma = anon_vma;
70856 avc->anon_vma = anon_vma;
70857 avc->vma = vma;
70858 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70859
70860 if (unlikely(allocated))
70861 put_anon_vma(allocated);
70862 +
70863 +#ifdef CONFIG_PAX_SEGMEXEC
70864 + if (unlikely(avc_m))
70865 + anon_vma_chain_free(avc_m);
70866 +#endif
70867 +
70868 if (unlikely(avc))
70869 anon_vma_chain_free(avc);
70870 }
70871 return 0;
70872
70873 out_enomem_free_avc:
70874 +
70875 +#ifdef CONFIG_PAX_SEGMEXEC
70876 + if (avc_m)
70877 + anon_vma_chain_free(avc_m);
70878 +#endif
70879 +
70880 anon_vma_chain_free(avc);
70881 out_enomem:
70882 return -ENOMEM;
70883 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70884 * Attach the anon_vmas from src to dst.
70885 * Returns 0 on success, -ENOMEM on failure.
70886 */
70887 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70888 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70889 {
70890 struct anon_vma_chain *avc, *pavc;
70891 struct anon_vma *root = NULL;
70892 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70893 * the corresponding VMA in the parent process is attached to.
70894 * Returns 0 on success, non-zero on failure.
70895 */
70896 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70897 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70898 {
70899 struct anon_vma_chain *avc;
70900 struct anon_vma *anon_vma;
70901 diff --git a/mm/shmem.c b/mm/shmem.c
70902 index 6c253f7..367e20a 100644
70903 --- a/mm/shmem.c
70904 +++ b/mm/shmem.c
70905 @@ -31,7 +31,7 @@
70906 #include <linux/export.h>
70907 #include <linux/swap.h>
70908
70909 -static struct vfsmount *shm_mnt;
70910 +struct vfsmount *shm_mnt;
70911
70912 #ifdef CONFIG_SHMEM
70913 /*
70914 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70915 #define BOGO_DIRENT_SIZE 20
70916
70917 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70918 -#define SHORT_SYMLINK_LEN 128
70919 +#define SHORT_SYMLINK_LEN 64
70920
70921 struct shmem_xattr {
70922 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70923 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70924 int err = -ENOMEM;
70925
70926 /* Round up to L1_CACHE_BYTES to resist false sharing */
70927 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70928 - L1_CACHE_BYTES), GFP_KERNEL);
70929 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70930 if (!sbinfo)
70931 return -ENOMEM;
70932
70933 diff --git a/mm/slab.c b/mm/slab.c
70934 index 83311c9a..fcf8f86 100644
70935 --- a/mm/slab.c
70936 +++ b/mm/slab.c
70937 @@ -151,7 +151,7 @@
70938
70939 /* Legal flag mask for kmem_cache_create(). */
70940 #if DEBUG
70941 -# define CREATE_MASK (SLAB_RED_ZONE | \
70942 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70943 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70944 SLAB_CACHE_DMA | \
70945 SLAB_STORE_USER | \
70946 @@ -159,7 +159,7 @@
70947 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70948 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70949 #else
70950 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70951 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70952 SLAB_CACHE_DMA | \
70953 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70954 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70955 @@ -288,7 +288,7 @@ struct kmem_list3 {
70956 * Need this for bootstrapping a per node allocator.
70957 */
70958 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70959 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70960 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70961 #define CACHE_CACHE 0
70962 #define SIZE_AC MAX_NUMNODES
70963 #define SIZE_L3 (2 * MAX_NUMNODES)
70964 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70965 if ((x)->max_freeable < i) \
70966 (x)->max_freeable = i; \
70967 } while (0)
70968 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70969 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70970 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70971 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70972 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70973 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70974 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70975 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70976 #else
70977 #define STATS_INC_ACTIVE(x) do { } while (0)
70978 #define STATS_DEC_ACTIVE(x) do { } while (0)
70979 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70980 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70981 */
70982 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70983 - const struct slab *slab, void *obj)
70984 + const struct slab *slab, const void *obj)
70985 {
70986 u32 offset = (obj - slab->s_mem);
70987 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70988 @@ -564,7 +564,7 @@ struct cache_names {
70989 static struct cache_names __initdata cache_names[] = {
70990 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70991 #include <linux/kmalloc_sizes.h>
70992 - {NULL,}
70993 + {NULL}
70994 #undef CACHE
70995 };
70996
70997 @@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
70998 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70999 sizes[INDEX_AC].cs_size,
71000 ARCH_KMALLOC_MINALIGN,
71001 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71002 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71003 NULL);
71004
71005 if (INDEX_AC != INDEX_L3) {
71006 @@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
71007 kmem_cache_create(names[INDEX_L3].name,
71008 sizes[INDEX_L3].cs_size,
71009 ARCH_KMALLOC_MINALIGN,
71010 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71011 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71012 NULL);
71013 }
71014
71015 @@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
71016 sizes->cs_cachep = kmem_cache_create(names->name,
71017 sizes->cs_size,
71018 ARCH_KMALLOC_MINALIGN,
71019 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71020 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71021 NULL);
71022 }
71023 #ifdef CONFIG_ZONE_DMA
71024 @@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
71025 }
71026 /* cpu stats */
71027 {
71028 - unsigned long allochit = atomic_read(&cachep->allochit);
71029 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
71030 - unsigned long freehit = atomic_read(&cachep->freehit);
71031 - unsigned long freemiss = atomic_read(&cachep->freemiss);
71032 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
71033 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
71034 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
71035 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
71036
71037 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
71038 allochit, allocmiss, freehit, freemiss);
71039 @@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
71040 {
71041 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
71042 #ifdef CONFIG_DEBUG_SLAB_LEAK
71043 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
71044 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
71045 #endif
71046 return 0;
71047 }
71048 module_init(slab_proc_init);
71049 #endif
71050
71051 +void check_object_size(const void *ptr, unsigned long n, bool to)
71052 +{
71053 +
71054 +#ifdef CONFIG_PAX_USERCOPY
71055 + struct page *page;
71056 + struct kmem_cache *cachep = NULL;
71057 + struct slab *slabp;
71058 + unsigned int objnr;
71059 + unsigned long offset;
71060 + const char *type;
71061 +
71062 + if (!n)
71063 + return;
71064 +
71065 + type = "<null>";
71066 + if (ZERO_OR_NULL_PTR(ptr))
71067 + goto report;
71068 +
71069 + if (!virt_addr_valid(ptr))
71070 + return;
71071 +
71072 + page = virt_to_head_page(ptr);
71073 +
71074 + type = "<process stack>";
71075 + if (!PageSlab(page)) {
71076 + if (object_is_on_stack(ptr, n) == -1)
71077 + goto report;
71078 + return;
71079 + }
71080 +
71081 + cachep = page_get_cache(page);
71082 + type = cachep->name;
71083 + if (!(cachep->flags & SLAB_USERCOPY))
71084 + goto report;
71085 +
71086 + slabp = page_get_slab(page);
71087 + objnr = obj_to_index(cachep, slabp, ptr);
71088 + BUG_ON(objnr >= cachep->num);
71089 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
71090 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
71091 + return;
71092 +
71093 +report:
71094 + pax_report_usercopy(ptr, n, to, type);
71095 +#endif
71096 +
71097 +}
71098 +EXPORT_SYMBOL(check_object_size);
71099 +
71100 /**
71101 * ksize - get the actual amount of memory allocated for a given object
71102 * @objp: Pointer to the object
71103 diff --git a/mm/slob.c b/mm/slob.c
71104 index 8105be4..e045f96 100644
71105 --- a/mm/slob.c
71106 +++ b/mm/slob.c
71107 @@ -29,7 +29,7 @@
71108 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
71109 * alloc_pages() directly, allocating compound pages so the page order
71110 * does not have to be separately tracked, and also stores the exact
71111 - * allocation size in page->private so that it can be used to accurately
71112 + * allocation size in slob_page->size so that it can be used to accurately
71113 * provide ksize(). These objects are detected in kfree() because slob_page()
71114 * is false for them.
71115 *
71116 @@ -58,6 +58,7 @@
71117 */
71118
71119 #include <linux/kernel.h>
71120 +#include <linux/sched.h>
71121 #include <linux/slab.h>
71122 #include <linux/mm.h>
71123 #include <linux/swap.h> /* struct reclaim_state */
71124 @@ -102,7 +103,8 @@ struct slob_page {
71125 unsigned long flags; /* mandatory */
71126 atomic_t _count; /* mandatory */
71127 slobidx_t units; /* free units left in page */
71128 - unsigned long pad[2];
71129 + unsigned long pad[1];
71130 + unsigned long size; /* size when >=PAGE_SIZE */
71131 slob_t *free; /* first free slob_t in page */
71132 struct list_head list; /* linked list of free pages */
71133 };
71134 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
71135 */
71136 static inline int is_slob_page(struct slob_page *sp)
71137 {
71138 - return PageSlab((struct page *)sp);
71139 + return PageSlab((struct page *)sp) && !sp->size;
71140 }
71141
71142 static inline void set_slob_page(struct slob_page *sp)
71143 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
71144
71145 static inline struct slob_page *slob_page(const void *addr)
71146 {
71147 - return (struct slob_page *)virt_to_page(addr);
71148 + return (struct slob_page *)virt_to_head_page(addr);
71149 }
71150
71151 /*
71152 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
71153 /*
71154 * Return the size of a slob block.
71155 */
71156 -static slobidx_t slob_units(slob_t *s)
71157 +static slobidx_t slob_units(const slob_t *s)
71158 {
71159 if (s->units > 0)
71160 return s->units;
71161 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
71162 /*
71163 * Return the next free slob block pointer after this one.
71164 */
71165 -static slob_t *slob_next(slob_t *s)
71166 +static slob_t *slob_next(const slob_t *s)
71167 {
71168 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
71169 slobidx_t next;
71170 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
71171 /*
71172 * Returns true if s is the last free block in its page.
71173 */
71174 -static int slob_last(slob_t *s)
71175 +static int slob_last(const slob_t *s)
71176 {
71177 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
71178 }
71179 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
71180 if (!page)
71181 return NULL;
71182
71183 + set_slob_page(page);
71184 return page_address(page);
71185 }
71186
71187 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
71188 if (!b)
71189 return NULL;
71190 sp = slob_page(b);
71191 - set_slob_page(sp);
71192
71193 spin_lock_irqsave(&slob_lock, flags);
71194 sp->units = SLOB_UNITS(PAGE_SIZE);
71195 sp->free = b;
71196 + sp->size = 0;
71197 INIT_LIST_HEAD(&sp->list);
71198 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
71199 set_slob_page_free(sp, slob_list);
71200 @@ -476,10 +479,9 @@ out:
71201 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
71202 */
71203
71204 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71205 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
71206 {
71207 - unsigned int *m;
71208 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71209 + slob_t *m;
71210 void *ret;
71211
71212 gfp &= gfp_allowed_mask;
71213 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71214
71215 if (!m)
71216 return NULL;
71217 - *m = size;
71218 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
71219 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
71220 + m[0].units = size;
71221 + m[1].units = align;
71222 ret = (void *)m + align;
71223
71224 trace_kmalloc_node(_RET_IP_, ret,
71225 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71226 gfp |= __GFP_COMP;
71227 ret = slob_new_pages(gfp, order, node);
71228 if (ret) {
71229 - struct page *page;
71230 - page = virt_to_page(ret);
71231 - page->private = size;
71232 + struct slob_page *sp;
71233 + sp = slob_page(ret);
71234 + sp->size = size;
71235 }
71236
71237 trace_kmalloc_node(_RET_IP_, ret,
71238 size, PAGE_SIZE << order, gfp, node);
71239 }
71240
71241 - kmemleak_alloc(ret, size, 1, gfp);
71242 + return ret;
71243 +}
71244 +
71245 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71246 +{
71247 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71248 + void *ret = __kmalloc_node_align(size, gfp, node, align);
71249 +
71250 + if (!ZERO_OR_NULL_PTR(ret))
71251 + kmemleak_alloc(ret, size, 1, gfp);
71252 return ret;
71253 }
71254 EXPORT_SYMBOL(__kmalloc_node);
71255 @@ -533,13 +547,92 @@ void kfree(const void *block)
71256 sp = slob_page(block);
71257 if (is_slob_page(sp)) {
71258 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71259 - unsigned int *m = (unsigned int *)(block - align);
71260 - slob_free(m, *m + align);
71261 - } else
71262 + slob_t *m = (slob_t *)(block - align);
71263 + slob_free(m, m[0].units + align);
71264 + } else {
71265 + clear_slob_page(sp);
71266 + free_slob_page(sp);
71267 + sp->size = 0;
71268 put_page(&sp->page);
71269 + }
71270 }
71271 EXPORT_SYMBOL(kfree);
71272
71273 +void check_object_size(const void *ptr, unsigned long n, bool to)
71274 +{
71275 +
71276 +#ifdef CONFIG_PAX_USERCOPY
71277 + struct slob_page *sp;
71278 + const slob_t *free;
71279 + const void *base;
71280 + unsigned long flags;
71281 + const char *type;
71282 +
71283 + if (!n)
71284 + return;
71285 +
71286 + type = "<null>";
71287 + if (ZERO_OR_NULL_PTR(ptr))
71288 + goto report;
71289 +
71290 + if (!virt_addr_valid(ptr))
71291 + return;
71292 +
71293 + type = "<process stack>";
71294 + sp = slob_page(ptr);
71295 + if (!PageSlab((struct page *)sp)) {
71296 + if (object_is_on_stack(ptr, n) == -1)
71297 + goto report;
71298 + return;
71299 + }
71300 +
71301 + type = "<slob>";
71302 + if (sp->size) {
71303 + base = page_address(&sp->page);
71304 + if (base <= ptr && n <= sp->size - (ptr - base))
71305 + return;
71306 + goto report;
71307 + }
71308 +
71309 + /* some tricky double walking to find the chunk */
71310 + spin_lock_irqsave(&slob_lock, flags);
71311 + base = (void *)((unsigned long)ptr & PAGE_MASK);
71312 + free = sp->free;
71313 +
71314 + while (!slob_last(free) && (void *)free <= ptr) {
71315 + base = free + slob_units(free);
71316 + free = slob_next(free);
71317 + }
71318 +
71319 + while (base < (void *)free) {
71320 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
71321 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
71322 + int offset;
71323 +
71324 + if (ptr < base + align)
71325 + break;
71326 +
71327 + offset = ptr - base - align;
71328 + if (offset >= m) {
71329 + base += size;
71330 + continue;
71331 + }
71332 +
71333 + if (n > m - offset)
71334 + break;
71335 +
71336 + spin_unlock_irqrestore(&slob_lock, flags);
71337 + return;
71338 + }
71339 +
71340 + spin_unlock_irqrestore(&slob_lock, flags);
71341 +report:
71342 + pax_report_usercopy(ptr, n, to, type);
71343 +#endif
71344 +
71345 +}
71346 +EXPORT_SYMBOL(check_object_size);
71347 +
71348 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
71349 size_t ksize(const void *block)
71350 {
71351 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
71352 sp = slob_page(block);
71353 if (is_slob_page(sp)) {
71354 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71355 - unsigned int *m = (unsigned int *)(block - align);
71356 - return SLOB_UNITS(*m) * SLOB_UNIT;
71357 + slob_t *m = (slob_t *)(block - align);
71358 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
71359 } else
71360 - return sp->page.private;
71361 + return sp->size;
71362 }
71363 EXPORT_SYMBOL(ksize);
71364
71365 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71366 {
71367 struct kmem_cache *c;
71368
71369 +#ifdef CONFIG_PAX_USERCOPY
71370 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
71371 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
71372 +#else
71373 c = slob_alloc(sizeof(struct kmem_cache),
71374 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
71375 +#endif
71376
71377 if (c) {
71378 c->name = name;
71379 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
71380
71381 lockdep_trace_alloc(flags);
71382
71383 +#ifdef CONFIG_PAX_USERCOPY
71384 + b = __kmalloc_node_align(c->size, flags, node, c->align);
71385 +#else
71386 if (c->size < PAGE_SIZE) {
71387 b = slob_alloc(c->size, flags, c->align, node);
71388 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71389 SLOB_UNITS(c->size) * SLOB_UNIT,
71390 flags, node);
71391 } else {
71392 + struct slob_page *sp;
71393 +
71394 b = slob_new_pages(flags, get_order(c->size), node);
71395 + sp = slob_page(b);
71396 + sp->size = c->size;
71397 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71398 PAGE_SIZE << get_order(c->size),
71399 flags, node);
71400 }
71401 +#endif
71402
71403 if (c->ctor)
71404 c->ctor(b);
71405 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71406
71407 static void __kmem_cache_free(void *b, int size)
71408 {
71409 - if (size < PAGE_SIZE)
71410 + struct slob_page *sp = slob_page(b);
71411 +
71412 + if (is_slob_page(sp))
71413 slob_free(b, size);
71414 - else
71415 + else {
71416 + clear_slob_page(sp);
71417 + free_slob_page(sp);
71418 + sp->size = 0;
71419 slob_free_pages(b, get_order(size));
71420 + }
71421 }
71422
71423 static void kmem_rcu_free(struct rcu_head *head)
71424 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
71425
71426 void kmem_cache_free(struct kmem_cache *c, void *b)
71427 {
71428 + int size = c->size;
71429 +
71430 +#ifdef CONFIG_PAX_USERCOPY
71431 + if (size + c->align < PAGE_SIZE) {
71432 + size += c->align;
71433 + b -= c->align;
71434 + }
71435 +#endif
71436 +
71437 kmemleak_free_recursive(b, c->flags);
71438 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71439 struct slob_rcu *slob_rcu;
71440 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71441 - slob_rcu->size = c->size;
71442 + slob_rcu = b + (size - sizeof(struct slob_rcu));
71443 + slob_rcu->size = size;
71444 call_rcu(&slob_rcu->head, kmem_rcu_free);
71445 } else {
71446 - __kmem_cache_free(b, c->size);
71447 + __kmem_cache_free(b, size);
71448 }
71449
71450 +#ifdef CONFIG_PAX_USERCOPY
71451 + trace_kfree(_RET_IP_, b);
71452 +#else
71453 trace_kmem_cache_free(_RET_IP_, b);
71454 +#endif
71455 +
71456 }
71457 EXPORT_SYMBOL(kmem_cache_free);
71458
71459 diff --git a/mm/slub.c b/mm/slub.c
71460 index 1a919f0..1739c9b 100644
71461 --- a/mm/slub.c
71462 +++ b/mm/slub.c
71463 @@ -208,7 +208,7 @@ struct track {
71464
71465 enum track_item { TRACK_ALLOC, TRACK_FREE };
71466
71467 -#ifdef CONFIG_SYSFS
71468 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71469 static int sysfs_slab_add(struct kmem_cache *);
71470 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71471 static void sysfs_slab_remove(struct kmem_cache *);
71472 @@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71473 if (!t->addr)
71474 return;
71475
71476 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71477 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71478 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71479 #ifdef CONFIG_STACKTRACE
71480 {
71481 @@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71482
71483 page = virt_to_head_page(x);
71484
71485 + BUG_ON(!PageSlab(page));
71486 +
71487 slab_free(s, page, x, _RET_IP_);
71488
71489 trace_kmem_cache_free(_RET_IP_, x);
71490 @@ -2592,7 +2594,7 @@ static int slub_min_objects;
71491 * Merge control. If this is set then no merging of slab caches will occur.
71492 * (Could be removed. This was introduced to pacify the merge skeptics.)
71493 */
71494 -static int slub_nomerge;
71495 +static int slub_nomerge = 1;
71496
71497 /*
71498 * Calculate the order of allocation given an slab object size.
71499 @@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71500 else
71501 s->cpu_partial = 30;
71502
71503 - s->refcount = 1;
71504 + atomic_set(&s->refcount, 1);
71505 #ifdef CONFIG_NUMA
71506 s->remote_node_defrag_ratio = 1000;
71507 #endif
71508 @@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71509 void kmem_cache_destroy(struct kmem_cache *s)
71510 {
71511 down_write(&slub_lock);
71512 - s->refcount--;
71513 - if (!s->refcount) {
71514 + if (atomic_dec_and_test(&s->refcount)) {
71515 list_del(&s->list);
71516 up_write(&slub_lock);
71517 if (kmem_cache_close(s)) {
71518 @@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71519 EXPORT_SYMBOL(__kmalloc_node);
71520 #endif
71521
71522 +void check_object_size(const void *ptr, unsigned long n, bool to)
71523 +{
71524 +
71525 +#ifdef CONFIG_PAX_USERCOPY
71526 + struct page *page;
71527 + struct kmem_cache *s = NULL;
71528 + unsigned long offset;
71529 + const char *type;
71530 +
71531 + if (!n)
71532 + return;
71533 +
71534 + type = "<null>";
71535 + if (ZERO_OR_NULL_PTR(ptr))
71536 + goto report;
71537 +
71538 + if (!virt_addr_valid(ptr))
71539 + return;
71540 +
71541 + page = virt_to_head_page(ptr);
71542 +
71543 + type = "<process stack>";
71544 + if (!PageSlab(page)) {
71545 + if (object_is_on_stack(ptr, n) == -1)
71546 + goto report;
71547 + return;
71548 + }
71549 +
71550 + s = page->slab;
71551 + type = s->name;
71552 + if (!(s->flags & SLAB_USERCOPY))
71553 + goto report;
71554 +
71555 + offset = (ptr - page_address(page)) % s->size;
71556 + if (offset <= s->objsize && n <= s->objsize - offset)
71557 + return;
71558 +
71559 +report:
71560 + pax_report_usercopy(ptr, n, to, type);
71561 +#endif
71562 +
71563 +}
71564 +EXPORT_SYMBOL(check_object_size);
71565 +
71566 size_t ksize(const void *object)
71567 {
71568 struct page *page;
71569 @@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71570 int node;
71571
71572 list_add(&s->list, &slab_caches);
71573 - s->refcount = -1;
71574 + atomic_set(&s->refcount, -1);
71575
71576 for_each_node_state(node, N_NORMAL_MEMORY) {
71577 struct kmem_cache_node *n = get_node(s, node);
71578 @@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71579
71580 /* Caches that are not of the two-to-the-power-of size */
71581 if (KMALLOC_MIN_SIZE <= 32) {
71582 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71583 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71584 caches++;
71585 }
71586
71587 if (KMALLOC_MIN_SIZE <= 64) {
71588 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71589 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71590 caches++;
71591 }
71592
71593 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71594 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71595 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71596 caches++;
71597 }
71598
71599 @@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71600 /*
71601 * We may have set a slab to be unmergeable during bootstrap.
71602 */
71603 - if (s->refcount < 0)
71604 + if (atomic_read(&s->refcount) < 0)
71605 return 1;
71606
71607 return 0;
71608 @@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71609 down_write(&slub_lock);
71610 s = find_mergeable(size, align, flags, name, ctor);
71611 if (s) {
71612 - s->refcount++;
71613 + atomic_inc(&s->refcount);
71614 /*
71615 * Adjust the object sizes so that we clear
71616 * the complete object on kzalloc.
71617 @@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71618 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71619
71620 if (sysfs_slab_alias(s, name)) {
71621 - s->refcount--;
71622 + atomic_dec(&s->refcount);
71623 goto err;
71624 }
71625 up_write(&slub_lock);
71626 @@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71627 }
71628 #endif
71629
71630 -#ifdef CONFIG_SYSFS
71631 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71632 static int count_inuse(struct page *page)
71633 {
71634 return page->inuse;
71635 @@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71636 validate_slab_cache(kmalloc_caches[9]);
71637 }
71638 #else
71639 -#ifdef CONFIG_SYSFS
71640 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71641 static void resiliency_test(void) {};
71642 #endif
71643 #endif
71644
71645 -#ifdef CONFIG_SYSFS
71646 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71647 enum slab_stat_type {
71648 SL_ALL, /* All slabs */
71649 SL_PARTIAL, /* Only partially allocated slabs */
71650 @@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71651
71652 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71653 {
71654 - return sprintf(buf, "%d\n", s->refcount - 1);
71655 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71656 }
71657 SLAB_ATTR_RO(aliases);
71658
71659 @@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71660 return name;
71661 }
71662
71663 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71664 static int sysfs_slab_add(struct kmem_cache *s)
71665 {
71666 int err;
71667 @@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71668 kobject_del(&s->kobj);
71669 kobject_put(&s->kobj);
71670 }
71671 +#endif
71672
71673 /*
71674 * Need to buffer aliases during bootup until sysfs becomes
71675 @@ -5298,6 +5345,7 @@ struct saved_alias {
71676
71677 static struct saved_alias *alias_list;
71678
71679 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71680 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71681 {
71682 struct saved_alias *al;
71683 @@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71684 alias_list = al;
71685 return 0;
71686 }
71687 +#endif
71688
71689 static int __init slab_sysfs_init(void)
71690 {
71691 diff --git a/mm/swap.c b/mm/swap.c
71692 index 55b266d..a532537 100644
71693 --- a/mm/swap.c
71694 +++ b/mm/swap.c
71695 @@ -31,6 +31,7 @@
71696 #include <linux/backing-dev.h>
71697 #include <linux/memcontrol.h>
71698 #include <linux/gfp.h>
71699 +#include <linux/hugetlb.h>
71700
71701 #include "internal.h"
71702
71703 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71704
71705 __page_cache_release(page);
71706 dtor = get_compound_page_dtor(page);
71707 + if (!PageHuge(page))
71708 + BUG_ON(dtor != free_compound_page);
71709 (*dtor)(page);
71710 }
71711
71712 diff --git a/mm/swapfile.c b/mm/swapfile.c
71713 index b1cd120..aaae885 100644
71714 --- a/mm/swapfile.c
71715 +++ b/mm/swapfile.c
71716 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71717
71718 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71719 /* Activity counter to indicate that a swapon or swapoff has occurred */
71720 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
71721 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71722
71723 static inline unsigned char swap_count(unsigned char ent)
71724 {
71725 @@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71726 }
71727 filp_close(swap_file, NULL);
71728 err = 0;
71729 - atomic_inc(&proc_poll_event);
71730 + atomic_inc_unchecked(&proc_poll_event);
71731 wake_up_interruptible(&proc_poll_wait);
71732
71733 out_dput:
71734 @@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71735
71736 poll_wait(file, &proc_poll_wait, wait);
71737
71738 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
71739 - seq->poll_event = atomic_read(&proc_poll_event);
71740 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71741 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71742 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71743 }
71744
71745 @@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71746 return ret;
71747
71748 seq = file->private_data;
71749 - seq->poll_event = atomic_read(&proc_poll_event);
71750 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71751 return 0;
71752 }
71753
71754 @@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71755 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71756
71757 mutex_unlock(&swapon_mutex);
71758 - atomic_inc(&proc_poll_event);
71759 + atomic_inc_unchecked(&proc_poll_event);
71760 wake_up_interruptible(&proc_poll_wait);
71761
71762 if (S_ISREG(inode->i_mode))
71763 diff --git a/mm/util.c b/mm/util.c
71764 index 136ac4f..5117eef 100644
71765 --- a/mm/util.c
71766 +++ b/mm/util.c
71767 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71768 * allocated buffer. Use this if you don't want to free the buffer immediately
71769 * like, for example, with RCU.
71770 */
71771 +#undef __krealloc
71772 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71773 {
71774 void *ret;
71775 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71776 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71777 * %NULL pointer, the object pointed to is freed.
71778 */
71779 +#undef krealloc
71780 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71781 {
71782 void *ret;
71783 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71784 void arch_pick_mmap_layout(struct mm_struct *mm)
71785 {
71786 mm->mmap_base = TASK_UNMAPPED_BASE;
71787 +
71788 +#ifdef CONFIG_PAX_RANDMMAP
71789 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71790 + mm->mmap_base += mm->delta_mmap;
71791 +#endif
71792 +
71793 mm->get_unmapped_area = arch_get_unmapped_area;
71794 mm->unmap_area = arch_unmap_area;
71795 }
71796 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71797 index 27be2f0..0aef2c2 100644
71798 --- a/mm/vmalloc.c
71799 +++ b/mm/vmalloc.c
71800 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71801
71802 pte = pte_offset_kernel(pmd, addr);
71803 do {
71804 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71805 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71806 +
71807 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71808 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71809 + BUG_ON(!pte_exec(*pte));
71810 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71811 + continue;
71812 + }
71813 +#endif
71814 +
71815 + {
71816 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71817 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71818 + }
71819 } while (pte++, addr += PAGE_SIZE, addr != end);
71820 }
71821
71822 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71823 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71824 {
71825 pte_t *pte;
71826 + int ret = -ENOMEM;
71827
71828 /*
71829 * nr is a running index into the array which helps higher level
71830 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71831 pte = pte_alloc_kernel(pmd, addr);
71832 if (!pte)
71833 return -ENOMEM;
71834 +
71835 + pax_open_kernel();
71836 do {
71837 struct page *page = pages[*nr];
71838
71839 - if (WARN_ON(!pte_none(*pte)))
71840 - return -EBUSY;
71841 - if (WARN_ON(!page))
71842 - return -ENOMEM;
71843 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71844 + if (pgprot_val(prot) & _PAGE_NX)
71845 +#endif
71846 +
71847 + if (WARN_ON(!pte_none(*pte))) {
71848 + ret = -EBUSY;
71849 + goto out;
71850 + }
71851 + if (WARN_ON(!page)) {
71852 + ret = -ENOMEM;
71853 + goto out;
71854 + }
71855 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71856 (*nr)++;
71857 } while (pte++, addr += PAGE_SIZE, addr != end);
71858 - return 0;
71859 + ret = 0;
71860 +out:
71861 + pax_close_kernel();
71862 + return ret;
71863 }
71864
71865 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71866 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71867 * and fall back on vmalloc() if that fails. Others
71868 * just put it in the vmalloc space.
71869 */
71870 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71871 +#ifdef CONFIG_MODULES
71872 +#ifdef MODULES_VADDR
71873 unsigned long addr = (unsigned long)x;
71874 if (addr >= MODULES_VADDR && addr < MODULES_END)
71875 return 1;
71876 #endif
71877 +
71878 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71879 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71880 + return 1;
71881 +#endif
71882 +
71883 +#endif
71884 +
71885 return is_vmalloc_addr(x);
71886 }
71887
71888 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71889
71890 if (!pgd_none(*pgd)) {
71891 pud_t *pud = pud_offset(pgd, addr);
71892 +#ifdef CONFIG_X86
71893 + if (!pud_large(*pud))
71894 +#endif
71895 if (!pud_none(*pud)) {
71896 pmd_t *pmd = pmd_offset(pud, addr);
71897 +#ifdef CONFIG_X86
71898 + if (!pmd_large(*pmd))
71899 +#endif
71900 if (!pmd_none(*pmd)) {
71901 pte_t *ptep, pte;
71902
71903 @@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71904 struct vm_struct *area;
71905
71906 BUG_ON(in_interrupt());
71907 +
71908 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71909 + if (flags & VM_KERNEXEC) {
71910 + if (start != VMALLOC_START || end != VMALLOC_END)
71911 + return NULL;
71912 + start = (unsigned long)MODULES_EXEC_VADDR;
71913 + end = (unsigned long)MODULES_EXEC_END;
71914 + }
71915 +#endif
71916 +
71917 if (flags & VM_IOREMAP) {
71918 int bit = fls(size);
71919
71920 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71921 if (count > totalram_pages)
71922 return NULL;
71923
71924 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71925 + if (!(pgprot_val(prot) & _PAGE_NX))
71926 + flags |= VM_KERNEXEC;
71927 +#endif
71928 +
71929 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71930 __builtin_return_address(0));
71931 if (!area)
71932 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71933 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71934 goto fail;
71935
71936 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71937 + if (!(pgprot_val(prot) & _PAGE_NX))
71938 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71939 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71940 + else
71941 +#endif
71942 +
71943 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71944 start, end, node, gfp_mask, caller);
71945 if (!area)
71946 @@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71947 gfp_mask, prot, node, caller);
71948 }
71949
71950 +#undef __vmalloc
71951 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71952 {
71953 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71954 @@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71955 * For tight control over page level allocator and protection flags
71956 * use __vmalloc() instead.
71957 */
71958 +#undef vmalloc
71959 void *vmalloc(unsigned long size)
71960 {
71961 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71962 @@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71963 * For tight control over page level allocator and protection flags
71964 * use __vmalloc() instead.
71965 */
71966 +#undef vzalloc
71967 void *vzalloc(unsigned long size)
71968 {
71969 return __vmalloc_node_flags(size, -1,
71970 @@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71971 * The resulting memory area is zeroed so it can be mapped to userspace
71972 * without leaking data.
71973 */
71974 +#undef vmalloc_user
71975 void *vmalloc_user(unsigned long size)
71976 {
71977 struct vm_struct *area;
71978 @@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71979 * For tight control over page level allocator and protection flags
71980 * use __vmalloc() instead.
71981 */
71982 +#undef vmalloc_node
71983 void *vmalloc_node(unsigned long size, int node)
71984 {
71985 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71986 @@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71987 * For tight control over page level allocator and protection flags
71988 * use __vmalloc_node() instead.
71989 */
71990 +#undef vzalloc_node
71991 void *vzalloc_node(unsigned long size, int node)
71992 {
71993 return __vmalloc_node_flags(size, node,
71994 @@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
71995 * For tight control over page level allocator and protection flags
71996 * use __vmalloc() instead.
71997 */
71998 -
71999 +#undef vmalloc_exec
72000 void *vmalloc_exec(unsigned long size)
72001 {
72002 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
72003 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
72004 -1, __builtin_return_address(0));
72005 }
72006
72007 @@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
72008 * Allocate enough 32bit PA addressable pages to cover @size from the
72009 * page level allocator and map them into contiguous kernel virtual space.
72010 */
72011 +#undef vmalloc_32
72012 void *vmalloc_32(unsigned long size)
72013 {
72014 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
72015 @@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
72016 * The resulting memory area is 32bit addressable and zeroed so it can be
72017 * mapped to userspace without leaking data.
72018 */
72019 +#undef vmalloc_32_user
72020 void *vmalloc_32_user(unsigned long size)
72021 {
72022 struct vm_struct *area;
72023 @@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
72024 unsigned long uaddr = vma->vm_start;
72025 unsigned long usize = vma->vm_end - vma->vm_start;
72026
72027 + BUG_ON(vma->vm_mirror);
72028 +
72029 if ((PAGE_SIZE-1) & (unsigned long)addr)
72030 return -EINVAL;
72031
72032 diff --git a/mm/vmstat.c b/mm/vmstat.c
72033 index 8fd603b..cf0d930 100644
72034 --- a/mm/vmstat.c
72035 +++ b/mm/vmstat.c
72036 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
72037 *
72038 * vm_stat contains the global counters
72039 */
72040 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
72041 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
72042 EXPORT_SYMBOL(vm_stat);
72043
72044 #ifdef CONFIG_SMP
72045 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
72046 v = p->vm_stat_diff[i];
72047 p->vm_stat_diff[i] = 0;
72048 local_irq_restore(flags);
72049 - atomic_long_add(v, &zone->vm_stat[i]);
72050 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
72051 global_diff[i] += v;
72052 #ifdef CONFIG_NUMA
72053 /* 3 seconds idle till flush */
72054 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
72055
72056 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
72057 if (global_diff[i])
72058 - atomic_long_add(global_diff[i], &vm_stat[i]);
72059 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
72060 }
72061
72062 #endif
72063 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
72064 start_cpu_timer(cpu);
72065 #endif
72066 #ifdef CONFIG_PROC_FS
72067 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
72068 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
72069 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
72070 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
72071 + {
72072 + mode_t gr_mode = S_IRUGO;
72073 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
72074 + gr_mode = S_IRUSR;
72075 +#endif
72076 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
72077 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
72078 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72079 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
72080 +#else
72081 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
72082 +#endif
72083 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
72084 + }
72085 #endif
72086 return 0;
72087 }
72088 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
72089 index 5471628..cef8398 100644
72090 --- a/net/8021q/vlan.c
72091 +++ b/net/8021q/vlan.c
72092 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
72093 err = -EPERM;
72094 if (!capable(CAP_NET_ADMIN))
72095 break;
72096 - if ((args.u.name_type >= 0) &&
72097 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
72098 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
72099 struct vlan_net *vn;
72100
72101 vn = net_generic(net, vlan_net_id);
72102 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
72103 index fdfdb57..38d368c 100644
72104 --- a/net/9p/trans_fd.c
72105 +++ b/net/9p/trans_fd.c
72106 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
72107 oldfs = get_fs();
72108 set_fs(get_ds());
72109 /* The cast to a user pointer is valid due to the set_fs() */
72110 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
72111 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
72112 set_fs(oldfs);
72113
72114 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
72115 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
72116 index f41f026..fe76ea8 100644
72117 --- a/net/atm/atm_misc.c
72118 +++ b/net/atm/atm_misc.c
72119 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
72120 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
72121 return 1;
72122 atm_return(vcc, truesize);
72123 - atomic_inc(&vcc->stats->rx_drop);
72124 + atomic_inc_unchecked(&vcc->stats->rx_drop);
72125 return 0;
72126 }
72127 EXPORT_SYMBOL(atm_charge);
72128 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
72129 }
72130 }
72131 atm_return(vcc, guess);
72132 - atomic_inc(&vcc->stats->rx_drop);
72133 + atomic_inc_unchecked(&vcc->stats->rx_drop);
72134 return NULL;
72135 }
72136 EXPORT_SYMBOL(atm_alloc_charge);
72137 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
72138
72139 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72140 {
72141 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72142 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72143 __SONET_ITEMS
72144 #undef __HANDLE_ITEM
72145 }
72146 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
72147
72148 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72149 {
72150 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72151 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
72152 __SONET_ITEMS
72153 #undef __HANDLE_ITEM
72154 }
72155 diff --git a/net/atm/lec.h b/net/atm/lec.h
72156 index dfc0719..47c5322 100644
72157 --- a/net/atm/lec.h
72158 +++ b/net/atm/lec.h
72159 @@ -48,7 +48,7 @@ struct lane2_ops {
72160 const u8 *tlvs, u32 sizeoftlvs);
72161 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
72162 const u8 *tlvs, u32 sizeoftlvs);
72163 -};
72164 +} __no_const;
72165
72166 /*
72167 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
72168 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
72169 index 0919a88..a23d54e 100644
72170 --- a/net/atm/mpc.h
72171 +++ b/net/atm/mpc.h
72172 @@ -33,7 +33,7 @@ struct mpoa_client {
72173 struct mpc_parameters parameters; /* parameters for this client */
72174
72175 const struct net_device_ops *old_ops;
72176 - struct net_device_ops new_ops;
72177 + net_device_ops_no_const new_ops;
72178 };
72179
72180
72181 diff --git a/net/atm/proc.c b/net/atm/proc.c
72182 index 0d020de..011c7bb 100644
72183 --- a/net/atm/proc.c
72184 +++ b/net/atm/proc.c
72185 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
72186 const struct k_atm_aal_stats *stats)
72187 {
72188 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
72189 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
72190 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
72191 - atomic_read(&stats->rx_drop));
72192 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
72193 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
72194 + atomic_read_unchecked(&stats->rx_drop));
72195 }
72196
72197 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
72198 diff --git a/net/atm/resources.c b/net/atm/resources.c
72199 index 23f45ce..c748f1a 100644
72200 --- a/net/atm/resources.c
72201 +++ b/net/atm/resources.c
72202 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
72203 static void copy_aal_stats(struct k_atm_aal_stats *from,
72204 struct atm_aal_stats *to)
72205 {
72206 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72207 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72208 __AAL_STAT_ITEMS
72209 #undef __HANDLE_ITEM
72210 }
72211 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
72212 static void subtract_aal_stats(struct k_atm_aal_stats *from,
72213 struct atm_aal_stats *to)
72214 {
72215 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72216 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
72217 __AAL_STAT_ITEMS
72218 #undef __HANDLE_ITEM
72219 }
72220 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
72221 index 3512e25..2b33401 100644
72222 --- a/net/batman-adv/bat_iv_ogm.c
72223 +++ b/net/batman-adv/bat_iv_ogm.c
72224 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
72225
72226 /* change sequence number to network order */
72227 batman_ogm_packet->seqno =
72228 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
72229 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
72230
72231 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
72232 batman_ogm_packet->tt_crc = htons((uint16_t)
72233 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
72234 else
72235 batman_ogm_packet->gw_flags = NO_FLAGS;
72236
72237 - atomic_inc(&hard_iface->seqno);
72238 + atomic_inc_unchecked(&hard_iface->seqno);
72239
72240 slide_own_bcast_window(hard_iface);
72241 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
72242 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
72243 return;
72244
72245 /* could be changed by schedule_own_packet() */
72246 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
72247 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
72248
72249 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
72250
72251 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
72252 index 7704df4..beb4e16 100644
72253 --- a/net/batman-adv/hard-interface.c
72254 +++ b/net/batman-adv/hard-interface.c
72255 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
72256 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
72257 dev_add_pack(&hard_iface->batman_adv_ptype);
72258
72259 - atomic_set(&hard_iface->seqno, 1);
72260 - atomic_set(&hard_iface->frag_seqno, 1);
72261 + atomic_set_unchecked(&hard_iface->seqno, 1);
72262 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
72263 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
72264 hard_iface->net_dev->name);
72265
72266 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
72267 index f9cc957..efd9dae 100644
72268 --- a/net/batman-adv/soft-interface.c
72269 +++ b/net/batman-adv/soft-interface.c
72270 @@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
72271
72272 /* set broadcast sequence number */
72273 bcast_packet->seqno =
72274 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
72275 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
72276
72277 add_bcast_packet_to_list(bat_priv, skb, 1);
72278
72279 @@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
72280 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
72281
72282 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
72283 - atomic_set(&bat_priv->bcast_seqno, 1);
72284 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
72285 atomic_set(&bat_priv->ttvn, 0);
72286 atomic_set(&bat_priv->tt_local_changes, 0);
72287 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
72288 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
72289 index ab8d0fe..ceba3fd 100644
72290 --- a/net/batman-adv/types.h
72291 +++ b/net/batman-adv/types.h
72292 @@ -38,8 +38,8 @@ struct hard_iface {
72293 int16_t if_num;
72294 char if_status;
72295 struct net_device *net_dev;
72296 - atomic_t seqno;
72297 - atomic_t frag_seqno;
72298 + atomic_unchecked_t seqno;
72299 + atomic_unchecked_t frag_seqno;
72300 unsigned char *packet_buff;
72301 int packet_len;
72302 struct kobject *hardif_obj;
72303 @@ -154,7 +154,7 @@ struct bat_priv {
72304 atomic_t orig_interval; /* uint */
72305 atomic_t hop_penalty; /* uint */
72306 atomic_t log_level; /* uint */
72307 - atomic_t bcast_seqno;
72308 + atomic_unchecked_t bcast_seqno;
72309 atomic_t bcast_queue_left;
72310 atomic_t batman_queue_left;
72311 atomic_t ttvn; /* translation table version number */
72312 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
72313 index 07d1c1d..7e9bea9 100644
72314 --- a/net/batman-adv/unicast.c
72315 +++ b/net/batman-adv/unicast.c
72316 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
72317 frag1->flags = UNI_FRAG_HEAD | large_tail;
72318 frag2->flags = large_tail;
72319
72320 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
72321 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
72322 frag1->seqno = htons(seqno - 1);
72323 frag2->seqno = htons(seqno);
72324
72325 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
72326 index c1c597e..05ebb40 100644
72327 --- a/net/bluetooth/hci_conn.c
72328 +++ b/net/bluetooth/hci_conn.c
72329 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
72330 memset(&cp, 0, sizeof(cp));
72331
72332 cp.handle = cpu_to_le16(conn->handle);
72333 - memcpy(cp.ltk, ltk, sizeof(ltk));
72334 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
72335
72336 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
72337 }
72338 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
72339 index 17b5b1c..826d872 100644
72340 --- a/net/bluetooth/l2cap_core.c
72341 +++ b/net/bluetooth/l2cap_core.c
72342 @@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
72343 break;
72344
72345 case L2CAP_CONF_RFC:
72346 - if (olen == sizeof(rfc))
72347 - memcpy(&rfc, (void *)val, olen);
72348 + if (olen != sizeof(rfc))
72349 + break;
72350 +
72351 + memcpy(&rfc, (void *)val, olen);
72352
72353 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
72354 rfc.mode != chan->mode)
72355 @@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
72356
72357 switch (type) {
72358 case L2CAP_CONF_RFC:
72359 - if (olen == sizeof(rfc))
72360 - memcpy(&rfc, (void *)val, olen);
72361 + if (olen != sizeof(rfc))
72362 + break;
72363 +
72364 + memcpy(&rfc, (void *)val, olen);
72365 goto done;
72366 }
72367 }
72368 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
72369 index a5f4e57..910ee6d 100644
72370 --- a/net/bridge/br_multicast.c
72371 +++ b/net/bridge/br_multicast.c
72372 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
72373 nexthdr = ip6h->nexthdr;
72374 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
72375
72376 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
72377 + if (nexthdr != IPPROTO_ICMPV6)
72378 return 0;
72379
72380 /* Okay, we found ICMPv6 header */
72381 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
72382 index 5864cc4..121f3a3 100644
72383 --- a/net/bridge/netfilter/ebtables.c
72384 +++ b/net/bridge/netfilter/ebtables.c
72385 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
72386 tmp.valid_hooks = t->table->valid_hooks;
72387 }
72388 mutex_unlock(&ebt_mutex);
72389 - if (copy_to_user(user, &tmp, *len) != 0){
72390 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
72391 BUGPRINT("c2u Didn't work\n");
72392 ret = -EFAULT;
72393 break;
72394 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
72395 index a986280..13444a1 100644
72396 --- a/net/caif/caif_socket.c
72397 +++ b/net/caif/caif_socket.c
72398 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
72399 #ifdef CONFIG_DEBUG_FS
72400 struct debug_fs_counter {
72401 atomic_t caif_nr_socks;
72402 - atomic_t caif_sock_create;
72403 - atomic_t num_connect_req;
72404 - atomic_t num_connect_resp;
72405 - atomic_t num_connect_fail_resp;
72406 - atomic_t num_disconnect;
72407 - atomic_t num_remote_shutdown_ind;
72408 - atomic_t num_tx_flow_off_ind;
72409 - atomic_t num_tx_flow_on_ind;
72410 - atomic_t num_rx_flow_off;
72411 - atomic_t num_rx_flow_on;
72412 + atomic_unchecked_t caif_sock_create;
72413 + atomic_unchecked_t num_connect_req;
72414 + atomic_unchecked_t num_connect_resp;
72415 + atomic_unchecked_t num_connect_fail_resp;
72416 + atomic_unchecked_t num_disconnect;
72417 + atomic_unchecked_t num_remote_shutdown_ind;
72418 + atomic_unchecked_t num_tx_flow_off_ind;
72419 + atomic_unchecked_t num_tx_flow_on_ind;
72420 + atomic_unchecked_t num_rx_flow_off;
72421 + atomic_unchecked_t num_rx_flow_on;
72422 };
72423 static struct debug_fs_counter cnt;
72424 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72425 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72426 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72427 #else
72428 #define dbfs_atomic_inc(v) 0
72429 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72430 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72431 sk_rcvbuf_lowwater(cf_sk));
72432 set_rx_flow_off(cf_sk);
72433 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72434 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72435 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72436 }
72437
72438 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72439 set_rx_flow_off(cf_sk);
72440 if (net_ratelimit())
72441 pr_debug("sending flow OFF due to rmem_schedule\n");
72442 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72443 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72444 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72445 }
72446 skb->dev = NULL;
72447 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72448 switch (flow) {
72449 case CAIF_CTRLCMD_FLOW_ON_IND:
72450 /* OK from modem to start sending again */
72451 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72452 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72453 set_tx_flow_on(cf_sk);
72454 cf_sk->sk.sk_state_change(&cf_sk->sk);
72455 break;
72456
72457 case CAIF_CTRLCMD_FLOW_OFF_IND:
72458 /* Modem asks us to shut up */
72459 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72460 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72461 set_tx_flow_off(cf_sk);
72462 cf_sk->sk.sk_state_change(&cf_sk->sk);
72463 break;
72464 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72465 /* We're now connected */
72466 caif_client_register_refcnt(&cf_sk->layer,
72467 cfsk_hold, cfsk_put);
72468 - dbfs_atomic_inc(&cnt.num_connect_resp);
72469 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72470 cf_sk->sk.sk_state = CAIF_CONNECTED;
72471 set_tx_flow_on(cf_sk);
72472 cf_sk->sk.sk_state_change(&cf_sk->sk);
72473 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72474
72475 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72476 /* Connect request failed */
72477 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72478 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72479 cf_sk->sk.sk_err = ECONNREFUSED;
72480 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72481 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72482 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72483
72484 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72485 /* Modem has closed this connection, or device is down. */
72486 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72487 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72488 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72489 cf_sk->sk.sk_err = ECONNRESET;
72490 set_rx_flow_on(cf_sk);
72491 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72492 return;
72493
72494 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72495 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
72496 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72497 set_rx_flow_on(cf_sk);
72498 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72499 }
72500 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72501 /*ifindex = id of the interface.*/
72502 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72503
72504 - dbfs_atomic_inc(&cnt.num_connect_req);
72505 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72506 cf_sk->layer.receive = caif_sktrecv_cb;
72507
72508 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72509 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72510 spin_unlock_bh(&sk->sk_receive_queue.lock);
72511 sock->sk = NULL;
72512
72513 - dbfs_atomic_inc(&cnt.num_disconnect);
72514 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72515
72516 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72517 if (cf_sk->debugfs_socket_dir != NULL)
72518 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72519 cf_sk->conn_req.protocol = protocol;
72520 /* Increase the number of sockets created. */
72521 dbfs_atomic_inc(&cnt.caif_nr_socks);
72522 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
72523 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72524 #ifdef CONFIG_DEBUG_FS
72525 if (!IS_ERR(debugfsdir)) {
72526
72527 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72528 index 5cf5222..6f704ad 100644
72529 --- a/net/caif/cfctrl.c
72530 +++ b/net/caif/cfctrl.c
72531 @@ -9,6 +9,7 @@
72532 #include <linux/stddef.h>
72533 #include <linux/spinlock.h>
72534 #include <linux/slab.h>
72535 +#include <linux/sched.h>
72536 #include <net/caif/caif_layer.h>
72537 #include <net/caif/cfpkt.h>
72538 #include <net/caif/cfctrl.h>
72539 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72540 memset(&dev_info, 0, sizeof(dev_info));
72541 dev_info.id = 0xff;
72542 cfsrvl_init(&this->serv, 0, &dev_info, false);
72543 - atomic_set(&this->req_seq_no, 1);
72544 - atomic_set(&this->rsp_seq_no, 1);
72545 + atomic_set_unchecked(&this->req_seq_no, 1);
72546 + atomic_set_unchecked(&this->rsp_seq_no, 1);
72547 this->serv.layer.receive = cfctrl_recv;
72548 sprintf(this->serv.layer.name, "ctrl");
72549 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72550 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72551 struct cfctrl_request_info *req)
72552 {
72553 spin_lock_bh(&ctrl->info_list_lock);
72554 - atomic_inc(&ctrl->req_seq_no);
72555 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
72556 + atomic_inc_unchecked(&ctrl->req_seq_no);
72557 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72558 list_add_tail(&req->list, &ctrl->list);
72559 spin_unlock_bh(&ctrl->info_list_lock);
72560 }
72561 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72562 if (p != first)
72563 pr_warn("Requests are not received in order\n");
72564
72565 - atomic_set(&ctrl->rsp_seq_no,
72566 + atomic_set_unchecked(&ctrl->rsp_seq_no,
72567 p->sequence_no);
72568 list_del(&p->list);
72569 goto out;
72570 diff --git a/net/can/gw.c b/net/can/gw.c
72571 index 3d79b12..8de85fa 100644
72572 --- a/net/can/gw.c
72573 +++ b/net/can/gw.c
72574 @@ -96,7 +96,7 @@ struct cf_mod {
72575 struct {
72576 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72577 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72578 - } csumfunc;
72579 + } __no_const csumfunc;
72580 };
72581
72582
72583 diff --git a/net/compat.c b/net/compat.c
72584 index 6def90e..c6992fa 100644
72585 --- a/net/compat.c
72586 +++ b/net/compat.c
72587 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72588 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72589 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72590 return -EFAULT;
72591 - kmsg->msg_name = compat_ptr(tmp1);
72592 - kmsg->msg_iov = compat_ptr(tmp2);
72593 - kmsg->msg_control = compat_ptr(tmp3);
72594 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72595 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72596 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72597 return 0;
72598 }
72599
72600 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72601
72602 if (kern_msg->msg_namelen) {
72603 if (mode == VERIFY_READ) {
72604 - int err = move_addr_to_kernel(kern_msg->msg_name,
72605 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72606 kern_msg->msg_namelen,
72607 kern_address);
72608 if (err < 0)
72609 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72610 kern_msg->msg_name = NULL;
72611
72612 tot_len = iov_from_user_compat_to_kern(kern_iov,
72613 - (struct compat_iovec __user *)kern_msg->msg_iov,
72614 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
72615 kern_msg->msg_iovlen);
72616 if (tot_len >= 0)
72617 kern_msg->msg_iov = kern_iov;
72618 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72619
72620 #define CMSG_COMPAT_FIRSTHDR(msg) \
72621 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72622 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72623 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72624 (struct compat_cmsghdr __user *)NULL)
72625
72626 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72627 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72628 (ucmlen) <= (unsigned long) \
72629 ((mhdr)->msg_controllen - \
72630 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72631 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72632
72633 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72634 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72635 {
72636 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72637 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72638 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72639 msg->msg_controllen)
72640 return NULL;
72641 return (struct compat_cmsghdr __user *)ptr;
72642 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72643 {
72644 struct compat_timeval ctv;
72645 struct compat_timespec cts[3];
72646 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72647 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72648 struct compat_cmsghdr cmhdr;
72649 int cmlen;
72650
72651 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72652
72653 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72654 {
72655 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72656 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72657 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72658 int fdnum = scm->fp->count;
72659 struct file **fp = scm->fp->fp;
72660 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72661 return -EFAULT;
72662 old_fs = get_fs();
72663 set_fs(KERNEL_DS);
72664 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72665 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72666 set_fs(old_fs);
72667
72668 return err;
72669 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72670 len = sizeof(ktime);
72671 old_fs = get_fs();
72672 set_fs(KERNEL_DS);
72673 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72674 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72675 set_fs(old_fs);
72676
72677 if (!err) {
72678 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72679 case MCAST_JOIN_GROUP:
72680 case MCAST_LEAVE_GROUP:
72681 {
72682 - struct compat_group_req __user *gr32 = (void *)optval;
72683 + struct compat_group_req __user *gr32 = (void __user *)optval;
72684 struct group_req __user *kgr =
72685 compat_alloc_user_space(sizeof(struct group_req));
72686 u32 interface;
72687 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72688 case MCAST_BLOCK_SOURCE:
72689 case MCAST_UNBLOCK_SOURCE:
72690 {
72691 - struct compat_group_source_req __user *gsr32 = (void *)optval;
72692 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72693 struct group_source_req __user *kgsr = compat_alloc_user_space(
72694 sizeof(struct group_source_req));
72695 u32 interface;
72696 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72697 }
72698 case MCAST_MSFILTER:
72699 {
72700 - struct compat_group_filter __user *gf32 = (void *)optval;
72701 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72702 struct group_filter __user *kgf;
72703 u32 interface, fmode, numsrc;
72704
72705 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72706 char __user *optval, int __user *optlen,
72707 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72708 {
72709 - struct compat_group_filter __user *gf32 = (void *)optval;
72710 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72711 struct group_filter __user *kgf;
72712 int __user *koptlen;
72713 u32 interface, fmode, numsrc;
72714 diff --git a/net/core/datagram.c b/net/core/datagram.c
72715 index 68bbf9f..5ef0d12 100644
72716 --- a/net/core/datagram.c
72717 +++ b/net/core/datagram.c
72718 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72719 }
72720
72721 kfree_skb(skb);
72722 - atomic_inc(&sk->sk_drops);
72723 + atomic_inc_unchecked(&sk->sk_drops);
72724 sk_mem_reclaim_partial(sk);
72725
72726 return err;
72727 diff --git a/net/core/dev.c b/net/core/dev.c
72728 index 5a13edf..a6f2bd2 100644
72729 --- a/net/core/dev.c
72730 +++ b/net/core/dev.c
72731 @@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72732 if (no_module && capable(CAP_NET_ADMIN))
72733 no_module = request_module("netdev-%s", name);
72734 if (no_module && capable(CAP_SYS_MODULE)) {
72735 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72736 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
72737 +#else
72738 if (!request_module("%s", name))
72739 pr_err("Loading kernel module for a network device "
72740 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72741 "instead\n", name);
72742 +#endif
72743 }
72744 }
72745 EXPORT_SYMBOL(dev_load);
72746 @@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72747 {
72748 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72749 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72750 - atomic_long_inc(&dev->rx_dropped);
72751 + atomic_long_inc_unchecked(&dev->rx_dropped);
72752 kfree_skb(skb);
72753 return NET_RX_DROP;
72754 }
72755 @@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72756 nf_reset(skb);
72757
72758 if (unlikely(!is_skb_forwardable(dev, skb))) {
72759 - atomic_long_inc(&dev->rx_dropped);
72760 + atomic_long_inc_unchecked(&dev->rx_dropped);
72761 kfree_skb(skb);
72762 return NET_RX_DROP;
72763 }
72764 @@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72765
72766 struct dev_gso_cb {
72767 void (*destructor)(struct sk_buff *skb);
72768 -};
72769 +} __no_const;
72770
72771 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72772
72773 @@ -2970,7 +2974,7 @@ enqueue:
72774
72775 local_irq_restore(flags);
72776
72777 - atomic_long_inc(&skb->dev->rx_dropped);
72778 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72779 kfree_skb(skb);
72780 return NET_RX_DROP;
72781 }
72782 @@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72783 }
72784 EXPORT_SYMBOL(netif_rx_ni);
72785
72786 -static void net_tx_action(struct softirq_action *h)
72787 +static void net_tx_action(void)
72788 {
72789 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72790
72791 @@ -3333,7 +3337,7 @@ ncls:
72792 if (pt_prev) {
72793 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72794 } else {
72795 - atomic_long_inc(&skb->dev->rx_dropped);
72796 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72797 kfree_skb(skb);
72798 /* Jamal, now you will not able to escape explaining
72799 * me how you were going to use this. :-)
72800 @@ -3891,7 +3895,7 @@ void netif_napi_del(struct napi_struct *napi)
72801 }
72802 EXPORT_SYMBOL(netif_napi_del);
72803
72804 -static void net_rx_action(struct softirq_action *h)
72805 +static void net_rx_action(void)
72806 {
72807 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72808 unsigned long time_limit = jiffies + 2;
72809 @@ -5949,7 +5953,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72810 } else {
72811 netdev_stats_to_stats64(storage, &dev->stats);
72812 }
72813 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72814 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72815 return storage;
72816 }
72817 EXPORT_SYMBOL(dev_get_stats);
72818 diff --git a/net/core/flow.c b/net/core/flow.c
72819 index e318c7e..168b1d0 100644
72820 --- a/net/core/flow.c
72821 +++ b/net/core/flow.c
72822 @@ -61,7 +61,7 @@ struct flow_cache {
72823 struct timer_list rnd_timer;
72824 };
72825
72826 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
72827 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72828 EXPORT_SYMBOL(flow_cache_genid);
72829 static struct flow_cache flow_cache_global;
72830 static struct kmem_cache *flow_cachep __read_mostly;
72831 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72832
72833 static int flow_entry_valid(struct flow_cache_entry *fle)
72834 {
72835 - if (atomic_read(&flow_cache_genid) != fle->genid)
72836 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72837 return 0;
72838 if (fle->object && !fle->object->ops->check(fle->object))
72839 return 0;
72840 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72841 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72842 fcp->hash_count++;
72843 }
72844 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72845 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72846 flo = fle->object;
72847 if (!flo)
72848 goto ret_object;
72849 @@ -280,7 +280,7 @@ nocache:
72850 }
72851 flo = resolver(net, key, family, dir, flo, ctx);
72852 if (fle) {
72853 - fle->genid = atomic_read(&flow_cache_genid);
72854 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
72855 if (!IS_ERR(flo))
72856 fle->object = flo;
72857 else
72858 diff --git a/net/core/iovec.c b/net/core/iovec.c
72859 index c40f27e..7f49254 100644
72860 --- a/net/core/iovec.c
72861 +++ b/net/core/iovec.c
72862 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72863 if (m->msg_namelen) {
72864 if (mode == VERIFY_READ) {
72865 void __user *namep;
72866 - namep = (void __user __force *) m->msg_name;
72867 + namep = (void __force_user *) m->msg_name;
72868 err = move_addr_to_kernel(namep, m->msg_namelen,
72869 address);
72870 if (err < 0)
72871 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72872 }
72873
72874 size = m->msg_iovlen * sizeof(struct iovec);
72875 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72876 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72877 return -EFAULT;
72878
72879 m->msg_iov = iov;
72880 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72881 index 9083e82..1673203 100644
72882 --- a/net/core/rtnetlink.c
72883 +++ b/net/core/rtnetlink.c
72884 @@ -57,7 +57,7 @@ struct rtnl_link {
72885 rtnl_doit_func doit;
72886 rtnl_dumpit_func dumpit;
72887 rtnl_calcit_func calcit;
72888 -};
72889 +} __no_const;
72890
72891 static DEFINE_MUTEX(rtnl_mutex);
72892 static u16 min_ifinfo_dump_size;
72893 diff --git a/net/core/scm.c b/net/core/scm.c
72894 index ff52ad0..aff1c0f 100644
72895 --- a/net/core/scm.c
72896 +++ b/net/core/scm.c
72897 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72898 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72899 {
72900 struct cmsghdr __user *cm
72901 - = (__force struct cmsghdr __user *)msg->msg_control;
72902 + = (struct cmsghdr __force_user *)msg->msg_control;
72903 struct cmsghdr cmhdr;
72904 int cmlen = CMSG_LEN(len);
72905 int err;
72906 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72907 err = -EFAULT;
72908 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72909 goto out;
72910 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72911 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72912 goto out;
72913 cmlen = CMSG_SPACE(len);
72914 if (msg->msg_controllen < cmlen)
72915 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72916 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72917 {
72918 struct cmsghdr __user *cm
72919 - = (__force struct cmsghdr __user*)msg->msg_control;
72920 + = (struct cmsghdr __force_user *)msg->msg_control;
72921
72922 int fdmax = 0;
72923 int fdnum = scm->fp->count;
72924 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72925 if (fdnum < fdmax)
72926 fdmax = fdnum;
72927
72928 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72929 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72930 i++, cmfptr++)
72931 {
72932 int new_fd;
72933 diff --git a/net/core/sock.c b/net/core/sock.c
72934 index b23f174..b9a0d26 100644
72935 --- a/net/core/sock.c
72936 +++ b/net/core/sock.c
72937 @@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72938 struct sk_buff_head *list = &sk->sk_receive_queue;
72939
72940 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72941 - atomic_inc(&sk->sk_drops);
72942 + atomic_inc_unchecked(&sk->sk_drops);
72943 trace_sock_rcvqueue_full(sk, skb);
72944 return -ENOMEM;
72945 }
72946 @@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72947 return err;
72948
72949 if (!sk_rmem_schedule(sk, skb->truesize)) {
72950 - atomic_inc(&sk->sk_drops);
72951 + atomic_inc_unchecked(&sk->sk_drops);
72952 return -ENOBUFS;
72953 }
72954
72955 @@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72956 skb_dst_force(skb);
72957
72958 spin_lock_irqsave(&list->lock, flags);
72959 - skb->dropcount = atomic_read(&sk->sk_drops);
72960 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72961 __skb_queue_tail(list, skb);
72962 spin_unlock_irqrestore(&list->lock, flags);
72963
72964 @@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72965 skb->dev = NULL;
72966
72967 if (sk_rcvqueues_full(sk, skb)) {
72968 - atomic_inc(&sk->sk_drops);
72969 + atomic_inc_unchecked(&sk->sk_drops);
72970 goto discard_and_relse;
72971 }
72972 if (nested)
72973 @@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72974 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72975 } else if (sk_add_backlog(sk, skb)) {
72976 bh_unlock_sock(sk);
72977 - atomic_inc(&sk->sk_drops);
72978 + atomic_inc_unchecked(&sk->sk_drops);
72979 goto discard_and_relse;
72980 }
72981
72982 @@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72983 if (len > sizeof(peercred))
72984 len = sizeof(peercred);
72985 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72986 - if (copy_to_user(optval, &peercred, len))
72987 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72988 return -EFAULT;
72989 goto lenout;
72990 }
72991 @@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72992 return -ENOTCONN;
72993 if (lv < len)
72994 return -EINVAL;
72995 - if (copy_to_user(optval, address, len))
72996 + if (len > sizeof(address) || copy_to_user(optval, address, len))
72997 return -EFAULT;
72998 goto lenout;
72999 }
73000 @@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
73001
73002 if (len > lv)
73003 len = lv;
73004 - if (copy_to_user(optval, &v, len))
73005 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
73006 return -EFAULT;
73007 lenout:
73008 if (put_user(len, optlen))
73009 @@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
73010 */
73011 smp_wmb();
73012 atomic_set(&sk->sk_refcnt, 1);
73013 - atomic_set(&sk->sk_drops, 0);
73014 + atomic_set_unchecked(&sk->sk_drops, 0);
73015 }
73016 EXPORT_SYMBOL(sock_init_data);
73017
73018 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
73019 index 02e75d1..9a57a7c 100644
73020 --- a/net/decnet/sysctl_net_decnet.c
73021 +++ b/net/decnet/sysctl_net_decnet.c
73022 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
73023
73024 if (len > *lenp) len = *lenp;
73025
73026 - if (copy_to_user(buffer, addr, len))
73027 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
73028 return -EFAULT;
73029
73030 *lenp = len;
73031 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
73032
73033 if (len > *lenp) len = *lenp;
73034
73035 - if (copy_to_user(buffer, devname, len))
73036 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
73037 return -EFAULT;
73038
73039 *lenp = len;
73040 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
73041 index 39a2d29..f39c0fe 100644
73042 --- a/net/econet/Kconfig
73043 +++ b/net/econet/Kconfig
73044 @@ -4,7 +4,7 @@
73045
73046 config ECONET
73047 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
73048 - depends on EXPERIMENTAL && INET
73049 + depends on EXPERIMENTAL && INET && BROKEN
73050 ---help---
73051 Econet is a fairly old and slow networking protocol mainly used by
73052 Acorn computers to access file and print servers. It uses native
73053 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
73054 index 92fc5f6..b790d91 100644
73055 --- a/net/ipv4/fib_frontend.c
73056 +++ b/net/ipv4/fib_frontend.c
73057 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
73058 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73059 fib_sync_up(dev);
73060 #endif
73061 - atomic_inc(&net->ipv4.dev_addr_genid);
73062 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73063 rt_cache_flush(dev_net(dev), -1);
73064 break;
73065 case NETDEV_DOWN:
73066 fib_del_ifaddr(ifa, NULL);
73067 - atomic_inc(&net->ipv4.dev_addr_genid);
73068 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73069 if (ifa->ifa_dev->ifa_list == NULL) {
73070 /* Last address was deleted from this interface.
73071 * Disable IP.
73072 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
73073 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73074 fib_sync_up(dev);
73075 #endif
73076 - atomic_inc(&net->ipv4.dev_addr_genid);
73077 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73078 rt_cache_flush(dev_net(dev), -1);
73079 break;
73080 case NETDEV_DOWN:
73081 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
73082 index 80106d8..232e898 100644
73083 --- a/net/ipv4/fib_semantics.c
73084 +++ b/net/ipv4/fib_semantics.c
73085 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
73086 nh->nh_saddr = inet_select_addr(nh->nh_dev,
73087 nh->nh_gw,
73088 nh->nh_parent->fib_scope);
73089 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
73090 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
73091
73092 return nh->nh_saddr;
73093 }
73094 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
73095 index ccee270..db23c3c 100644
73096 --- a/net/ipv4/inet_diag.c
73097 +++ b/net/ipv4/inet_diag.c
73098 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
73099 r->idiag_retrans = 0;
73100
73101 r->id.idiag_if = sk->sk_bound_dev_if;
73102 +
73103 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73104 + r->id.idiag_cookie[0] = 0;
73105 + r->id.idiag_cookie[1] = 0;
73106 +#else
73107 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
73108 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
73109 +#endif
73110
73111 r->id.idiag_sport = inet->inet_sport;
73112 r->id.idiag_dport = inet->inet_dport;
73113 @@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
73114 r->idiag_family = tw->tw_family;
73115 r->idiag_retrans = 0;
73116 r->id.idiag_if = tw->tw_bound_dev_if;
73117 +
73118 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73119 + r->id.idiag_cookie[0] = 0;
73120 + r->id.idiag_cookie[1] = 0;
73121 +#else
73122 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
73123 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
73124 +#endif
73125 +
73126 r->id.idiag_sport = tw->tw_sport;
73127 r->id.idiag_dport = tw->tw_dport;
73128 r->id.idiag_src[0] = tw->tw_rcv_saddr;
73129 @@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
73130 if (sk == NULL)
73131 goto unlock;
73132
73133 +#ifndef CONFIG_GRKERNSEC_HIDESYM
73134 err = -ESTALE;
73135 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
73136 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
73137 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
73138 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
73139 goto out;
73140 +#endif
73141
73142 err = -ENOMEM;
73143 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
73144 @@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
73145 r->idiag_retrans = req->retrans;
73146
73147 r->id.idiag_if = sk->sk_bound_dev_if;
73148 +
73149 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73150 + r->id.idiag_cookie[0] = 0;
73151 + r->id.idiag_cookie[1] = 0;
73152 +#else
73153 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
73154 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
73155 +#endif
73156
73157 tmo = req->expires - jiffies;
73158 if (tmo < 0)
73159 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
73160 index 984ec65..97ac518 100644
73161 --- a/net/ipv4/inet_hashtables.c
73162 +++ b/net/ipv4/inet_hashtables.c
73163 @@ -18,12 +18,15 @@
73164 #include <linux/sched.h>
73165 #include <linux/slab.h>
73166 #include <linux/wait.h>
73167 +#include <linux/security.h>
73168
73169 #include <net/inet_connection_sock.h>
73170 #include <net/inet_hashtables.h>
73171 #include <net/secure_seq.h>
73172 #include <net/ip.h>
73173
73174 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
73175 +
73176 /*
73177 * Allocate and initialize a new local port bind bucket.
73178 * The bindhash mutex for snum's hash chain must be held here.
73179 @@ -530,6 +533,8 @@ ok:
73180 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
73181 spin_unlock(&head->lock);
73182
73183 + gr_update_task_in_ip_table(current, inet_sk(sk));
73184 +
73185 if (tw) {
73186 inet_twsk_deschedule(tw, death_row);
73187 while (twrefcnt) {
73188 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
73189 index 86f13c67..59a35b5 100644
73190 --- a/net/ipv4/inetpeer.c
73191 +++ b/net/ipv4/inetpeer.c
73192 @@ -436,8 +436,8 @@ relookup:
73193 if (p) {
73194 p->daddr = *daddr;
73195 atomic_set(&p->refcnt, 1);
73196 - atomic_set(&p->rid, 0);
73197 - atomic_set(&p->ip_id_count,
73198 + atomic_set_unchecked(&p->rid, 0);
73199 + atomic_set_unchecked(&p->ip_id_count,
73200 (daddr->family == AF_INET) ?
73201 secure_ip_id(daddr->addr.a4) :
73202 secure_ipv6_id(daddr->addr.a6));
73203 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
73204 index fdaabf2..0ec3205 100644
73205 --- a/net/ipv4/ip_fragment.c
73206 +++ b/net/ipv4/ip_fragment.c
73207 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
73208 return 0;
73209
73210 start = qp->rid;
73211 - end = atomic_inc_return(&peer->rid);
73212 + end = atomic_inc_return_unchecked(&peer->rid);
73213 qp->rid = end;
73214
73215 rc = qp->q.fragments && (end - start) > max;
73216 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
73217 index 09ff51b..d3968eb 100644
73218 --- a/net/ipv4/ip_sockglue.c
73219 +++ b/net/ipv4/ip_sockglue.c
73220 @@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
73221 len = min_t(unsigned int, len, opt->optlen);
73222 if (put_user(len, optlen))
73223 return -EFAULT;
73224 - if (copy_to_user(optval, opt->__data, len))
73225 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
73226 + copy_to_user(optval, opt->__data, len))
73227 return -EFAULT;
73228 return 0;
73229 }
73230 @@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
73231 if (sk->sk_type != SOCK_STREAM)
73232 return -ENOPROTOOPT;
73233
73234 - msg.msg_control = optval;
73235 + msg.msg_control = (void __force_kernel *)optval;
73236 msg.msg_controllen = len;
73237 msg.msg_flags = flags;
73238
73239 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
73240 index 99ec116..c5628fe 100644
73241 --- a/net/ipv4/ipconfig.c
73242 +++ b/net/ipv4/ipconfig.c
73243 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
73244
73245 mm_segment_t oldfs = get_fs();
73246 set_fs(get_ds());
73247 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73248 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73249 set_fs(oldfs);
73250 return res;
73251 }
73252 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
73253
73254 mm_segment_t oldfs = get_fs();
73255 set_fs(get_ds());
73256 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73257 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73258 set_fs(oldfs);
73259 return res;
73260 }
73261 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
73262
73263 mm_segment_t oldfs = get_fs();
73264 set_fs(get_ds());
73265 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
73266 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
73267 set_fs(oldfs);
73268 return res;
73269 }
73270 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73271 index 2133c30..5c4b40b 100644
73272 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
73273 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73274 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
73275
73276 *len = 0;
73277
73278 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
73279 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
73280 if (*octets == NULL)
73281 return 0;
73282
73283 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
73284 index 43d4c3b..1914409 100644
73285 --- a/net/ipv4/ping.c
73286 +++ b/net/ipv4/ping.c
73287 @@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
73288 sk_rmem_alloc_get(sp),
73289 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73290 atomic_read(&sp->sk_refcnt), sp,
73291 - atomic_read(&sp->sk_drops), len);
73292 + atomic_read_unchecked(&sp->sk_drops), len);
73293 }
73294
73295 static int ping_seq_show(struct seq_file *seq, void *v)
73296 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
73297 index 007e2eb..85a18a0 100644
73298 --- a/net/ipv4/raw.c
73299 +++ b/net/ipv4/raw.c
73300 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
73301 int raw_rcv(struct sock *sk, struct sk_buff *skb)
73302 {
73303 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
73304 - atomic_inc(&sk->sk_drops);
73305 + atomic_inc_unchecked(&sk->sk_drops);
73306 kfree_skb(skb);
73307 return NET_RX_DROP;
73308 }
73309 @@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
73310
73311 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
73312 {
73313 + struct icmp_filter filter;
73314 +
73315 if (optlen > sizeof(struct icmp_filter))
73316 optlen = sizeof(struct icmp_filter);
73317 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
73318 + if (copy_from_user(&filter, optval, optlen))
73319 return -EFAULT;
73320 + raw_sk(sk)->filter = filter;
73321 return 0;
73322 }
73323
73324 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
73325 {
73326 int len, ret = -EFAULT;
73327 + struct icmp_filter filter;
73328
73329 if (get_user(len, optlen))
73330 goto out;
73331 @@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
73332 if (len > sizeof(struct icmp_filter))
73333 len = sizeof(struct icmp_filter);
73334 ret = -EFAULT;
73335 - if (put_user(len, optlen) ||
73336 - copy_to_user(optval, &raw_sk(sk)->filter, len))
73337 + filter = raw_sk(sk)->filter;
73338 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
73339 goto out;
73340 ret = 0;
73341 out: return ret;
73342 @@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73343 sk_wmem_alloc_get(sp),
73344 sk_rmem_alloc_get(sp),
73345 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73346 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73347 + atomic_read(&sp->sk_refcnt),
73348 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73349 + NULL,
73350 +#else
73351 + sp,
73352 +#endif
73353 + atomic_read_unchecked(&sp->sk_drops));
73354 }
73355
73356 static int raw_seq_show(struct seq_file *seq, void *v)
73357 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
73358 index 94cdbc5..0cb0063 100644
73359 --- a/net/ipv4/route.c
73360 +++ b/net/ipv4/route.c
73361 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
73362
73363 static inline int rt_genid(struct net *net)
73364 {
73365 - return atomic_read(&net->ipv4.rt_genid);
73366 + return atomic_read_unchecked(&net->ipv4.rt_genid);
73367 }
73368
73369 #ifdef CONFIG_PROC_FS
73370 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
73371 unsigned char shuffle;
73372
73373 get_random_bytes(&shuffle, sizeof(shuffle));
73374 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
73375 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
73376 redirect_genid++;
73377 }
73378
73379 @@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
73380 error = rt->dst.error;
73381 if (peer) {
73382 inet_peer_refcheck(rt->peer);
73383 - id = atomic_read(&peer->ip_id_count) & 0xffff;
73384 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
73385 if (peer->tcp_ts_stamp) {
73386 ts = peer->tcp_ts;
73387 tsage = get_seconds() - peer->tcp_ts_stamp;
73388 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
73389 index c89e354..8bd55c8 100644
73390 --- a/net/ipv4/tcp_ipv4.c
73391 +++ b/net/ipv4/tcp_ipv4.c
73392 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
73393 int sysctl_tcp_low_latency __read_mostly;
73394 EXPORT_SYMBOL(sysctl_tcp_low_latency);
73395
73396 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73397 +extern int grsec_enable_blackhole;
73398 +#endif
73399
73400 #ifdef CONFIG_TCP_MD5SIG
73401 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73402 @@ -1627,6 +1630,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
73403 return 0;
73404
73405 reset:
73406 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73407 + if (!grsec_enable_blackhole)
73408 +#endif
73409 tcp_v4_send_reset(rsk, skb);
73410 discard:
73411 kfree_skb(skb);
73412 @@ -1689,12 +1695,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73413 TCP_SKB_CB(skb)->sacked = 0;
73414
73415 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73416 - if (!sk)
73417 + if (!sk) {
73418 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73419 + ret = 1;
73420 +#endif
73421 goto no_tcp_socket;
73422 -
73423 + }
73424 process:
73425 - if (sk->sk_state == TCP_TIME_WAIT)
73426 + if (sk->sk_state == TCP_TIME_WAIT) {
73427 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73428 + ret = 2;
73429 +#endif
73430 goto do_time_wait;
73431 + }
73432
73433 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73434 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73435 @@ -1744,6 +1757,10 @@ no_tcp_socket:
73436 bad_packet:
73437 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73438 } else {
73439 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73440 + if (!grsec_enable_blackhole || (ret == 1 &&
73441 + (skb->dev->flags & IFF_LOOPBACK)))
73442 +#endif
73443 tcp_v4_send_reset(NULL, skb);
73444 }
73445
73446 @@ -2404,7 +2421,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73447 0, /* non standard timer */
73448 0, /* open_requests have no inode */
73449 atomic_read(&sk->sk_refcnt),
73450 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73451 + NULL,
73452 +#else
73453 req,
73454 +#endif
73455 len);
73456 }
73457
73458 @@ -2454,7 +2475,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73459 sock_i_uid(sk),
73460 icsk->icsk_probes_out,
73461 sock_i_ino(sk),
73462 - atomic_read(&sk->sk_refcnt), sk,
73463 + atomic_read(&sk->sk_refcnt),
73464 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73465 + NULL,
73466 +#else
73467 + sk,
73468 +#endif
73469 jiffies_to_clock_t(icsk->icsk_rto),
73470 jiffies_to_clock_t(icsk->icsk_ack.ato),
73471 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73472 @@ -2482,7 +2508,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73473 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73474 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73475 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73476 - atomic_read(&tw->tw_refcnt), tw, len);
73477 + atomic_read(&tw->tw_refcnt),
73478 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73479 + NULL,
73480 +#else
73481 + tw,
73482 +#endif
73483 + len);
73484 }
73485
73486 #define TMPSZ 150
73487 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73488 index 66363b6..b0654a3 100644
73489 --- a/net/ipv4/tcp_minisocks.c
73490 +++ b/net/ipv4/tcp_minisocks.c
73491 @@ -27,6 +27,10 @@
73492 #include <net/inet_common.h>
73493 #include <net/xfrm.h>
73494
73495 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73496 +extern int grsec_enable_blackhole;
73497 +#endif
73498 +
73499 int sysctl_tcp_syncookies __read_mostly = 1;
73500 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73501
73502 @@ -751,6 +755,10 @@ listen_overflow:
73503
73504 embryonic_reset:
73505 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73506 +
73507 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73508 + if (!grsec_enable_blackhole)
73509 +#endif
73510 if (!(flg & TCP_FLAG_RST))
73511 req->rsk_ops->send_reset(sk, skb);
73512
73513 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73514 index 85ee7eb..53277ab 100644
73515 --- a/net/ipv4/tcp_probe.c
73516 +++ b/net/ipv4/tcp_probe.c
73517 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73518 if (cnt + width >= len)
73519 break;
73520
73521 - if (copy_to_user(buf + cnt, tbuf, width))
73522 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73523 return -EFAULT;
73524 cnt += width;
73525 }
73526 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73527 index 2e0f0af..e2948bf 100644
73528 --- a/net/ipv4/tcp_timer.c
73529 +++ b/net/ipv4/tcp_timer.c
73530 @@ -22,6 +22,10 @@
73531 #include <linux/gfp.h>
73532 #include <net/tcp.h>
73533
73534 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73535 +extern int grsec_lastack_retries;
73536 +#endif
73537 +
73538 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73539 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73540 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73541 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73542 }
73543 }
73544
73545 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73546 + if ((sk->sk_state == TCP_LAST_ACK) &&
73547 + (grsec_lastack_retries > 0) &&
73548 + (grsec_lastack_retries < retry_until))
73549 + retry_until = grsec_lastack_retries;
73550 +#endif
73551 +
73552 if (retransmits_timed_out(sk, retry_until,
73553 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73554 /* Has it gone just too far? */
73555 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73556 index 5a65eea..bd913a1 100644
73557 --- a/net/ipv4/udp.c
73558 +++ b/net/ipv4/udp.c
73559 @@ -86,6 +86,7 @@
73560 #include <linux/types.h>
73561 #include <linux/fcntl.h>
73562 #include <linux/module.h>
73563 +#include <linux/security.h>
73564 #include <linux/socket.h>
73565 #include <linux/sockios.h>
73566 #include <linux/igmp.h>
73567 @@ -108,6 +109,10 @@
73568 #include <trace/events/udp.h>
73569 #include "udp_impl.h"
73570
73571 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73572 +extern int grsec_enable_blackhole;
73573 +#endif
73574 +
73575 struct udp_table udp_table __read_mostly;
73576 EXPORT_SYMBOL(udp_table);
73577
73578 @@ -565,6 +570,9 @@ found:
73579 return s;
73580 }
73581
73582 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73583 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73584 +
73585 /*
73586 * This routine is called by the ICMP module when it gets some
73587 * sort of error condition. If err < 0 then the socket should
73588 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73589 dport = usin->sin_port;
73590 if (dport == 0)
73591 return -EINVAL;
73592 +
73593 + err = gr_search_udp_sendmsg(sk, usin);
73594 + if (err)
73595 + return err;
73596 } else {
73597 if (sk->sk_state != TCP_ESTABLISHED)
73598 return -EDESTADDRREQ;
73599 +
73600 + err = gr_search_udp_sendmsg(sk, NULL);
73601 + if (err)
73602 + return err;
73603 +
73604 daddr = inet->inet_daddr;
73605 dport = inet->inet_dport;
73606 /* Open fast path for connected socket.
73607 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73608 udp_lib_checksum_complete(skb)) {
73609 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73610 IS_UDPLITE(sk));
73611 - atomic_inc(&sk->sk_drops);
73612 + atomic_inc_unchecked(&sk->sk_drops);
73613 __skb_unlink(skb, rcvq);
73614 __skb_queue_tail(&list_kill, skb);
73615 }
73616 @@ -1185,6 +1202,10 @@ try_again:
73617 if (!skb)
73618 goto out;
73619
73620 + err = gr_search_udp_recvmsg(sk, skb);
73621 + if (err)
73622 + goto out_free;
73623 +
73624 ulen = skb->len - sizeof(struct udphdr);
73625 copied = len;
73626 if (copied > ulen)
73627 @@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73628
73629 drop:
73630 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73631 - atomic_inc(&sk->sk_drops);
73632 + atomic_inc_unchecked(&sk->sk_drops);
73633 kfree_skb(skb);
73634 return -1;
73635 }
73636 @@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73637 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73638
73639 if (!skb1) {
73640 - atomic_inc(&sk->sk_drops);
73641 + atomic_inc_unchecked(&sk->sk_drops);
73642 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73643 IS_UDPLITE(sk));
73644 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73645 @@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73646 goto csum_error;
73647
73648 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73649 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73650 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73651 +#endif
73652 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73653
73654 /*
73655 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73656 sk_wmem_alloc_get(sp),
73657 sk_rmem_alloc_get(sp),
73658 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73659 - atomic_read(&sp->sk_refcnt), sp,
73660 - atomic_read(&sp->sk_drops), len);
73661 + atomic_read(&sp->sk_refcnt),
73662 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73663 + NULL,
73664 +#else
73665 + sp,
73666 +#endif
73667 + atomic_read_unchecked(&sp->sk_drops), len);
73668 }
73669
73670 int udp4_seq_show(struct seq_file *seq, void *v)
73671 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73672 index 836c4ea..cbb74dc 100644
73673 --- a/net/ipv6/addrconf.c
73674 +++ b/net/ipv6/addrconf.c
73675 @@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73676 p.iph.ihl = 5;
73677 p.iph.protocol = IPPROTO_IPV6;
73678 p.iph.ttl = 64;
73679 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73680 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73681
73682 if (ops->ndo_do_ioctl) {
73683 mm_segment_t oldfs = get_fs();
73684 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73685 index 1567fb1..29af910 100644
73686 --- a/net/ipv6/inet6_connection_sock.c
73687 +++ b/net/ipv6/inet6_connection_sock.c
73688 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73689 #ifdef CONFIG_XFRM
73690 {
73691 struct rt6_info *rt = (struct rt6_info *)dst;
73692 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73693 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73694 }
73695 #endif
73696 }
73697 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73698 #ifdef CONFIG_XFRM
73699 if (dst) {
73700 struct rt6_info *rt = (struct rt6_info *)dst;
73701 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73702 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73703 __sk_dst_reset(sk);
73704 dst = NULL;
73705 }
73706 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73707 index 26cb08c..8af9877 100644
73708 --- a/net/ipv6/ipv6_sockglue.c
73709 +++ b/net/ipv6/ipv6_sockglue.c
73710 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73711 if (sk->sk_type != SOCK_STREAM)
73712 return -ENOPROTOOPT;
73713
73714 - msg.msg_control = optval;
73715 + msg.msg_control = (void __force_kernel *)optval;
73716 msg.msg_controllen = len;
73717 msg.msg_flags = flags;
73718
73719 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73720 index 361ebf3..d5628fb 100644
73721 --- a/net/ipv6/raw.c
73722 +++ b/net/ipv6/raw.c
73723 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73724 {
73725 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73726 skb_checksum_complete(skb)) {
73727 - atomic_inc(&sk->sk_drops);
73728 + atomic_inc_unchecked(&sk->sk_drops);
73729 kfree_skb(skb);
73730 return NET_RX_DROP;
73731 }
73732 @@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73733 struct raw6_sock *rp = raw6_sk(sk);
73734
73735 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73736 - atomic_inc(&sk->sk_drops);
73737 + atomic_inc_unchecked(&sk->sk_drops);
73738 kfree_skb(skb);
73739 return NET_RX_DROP;
73740 }
73741 @@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73742
73743 if (inet->hdrincl) {
73744 if (skb_checksum_complete(skb)) {
73745 - atomic_inc(&sk->sk_drops);
73746 + atomic_inc_unchecked(&sk->sk_drops);
73747 kfree_skb(skb);
73748 return NET_RX_DROP;
73749 }
73750 @@ -601,7 +601,7 @@ out:
73751 return err;
73752 }
73753
73754 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73755 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73756 struct flowi6 *fl6, struct dst_entry **dstp,
73757 unsigned int flags)
73758 {
73759 @@ -909,12 +909,15 @@ do_confirm:
73760 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73761 char __user *optval, int optlen)
73762 {
73763 + struct icmp6_filter filter;
73764 +
73765 switch (optname) {
73766 case ICMPV6_FILTER:
73767 if (optlen > sizeof(struct icmp6_filter))
73768 optlen = sizeof(struct icmp6_filter);
73769 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73770 + if (copy_from_user(&filter, optval, optlen))
73771 return -EFAULT;
73772 + raw6_sk(sk)->filter = filter;
73773 return 0;
73774 default:
73775 return -ENOPROTOOPT;
73776 @@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73777 char __user *optval, int __user *optlen)
73778 {
73779 int len;
73780 + struct icmp6_filter filter;
73781
73782 switch (optname) {
73783 case ICMPV6_FILTER:
73784 @@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73785 len = sizeof(struct icmp6_filter);
73786 if (put_user(len, optlen))
73787 return -EFAULT;
73788 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73789 + filter = raw6_sk(sk)->filter;
73790 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
73791 return -EFAULT;
73792 return 0;
73793 default:
73794 @@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73795 0, 0L, 0,
73796 sock_i_uid(sp), 0,
73797 sock_i_ino(sp),
73798 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73799 + atomic_read(&sp->sk_refcnt),
73800 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73801 + NULL,
73802 +#else
73803 + sp,
73804 +#endif
73805 + atomic_read_unchecked(&sp->sk_drops));
73806 }
73807
73808 static int raw6_seq_show(struct seq_file *seq, void *v)
73809 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73810 index b859e4a..f9d1589 100644
73811 --- a/net/ipv6/tcp_ipv6.c
73812 +++ b/net/ipv6/tcp_ipv6.c
73813 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73814 }
73815 #endif
73816
73817 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73818 +extern int grsec_enable_blackhole;
73819 +#endif
73820 +
73821 static void tcp_v6_hash(struct sock *sk)
73822 {
73823 if (sk->sk_state != TCP_CLOSE) {
73824 @@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73825 return 0;
73826
73827 reset:
73828 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73829 + if (!grsec_enable_blackhole)
73830 +#endif
73831 tcp_v6_send_reset(sk, skb);
73832 discard:
73833 if (opt_skb)
73834 @@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73835 TCP_SKB_CB(skb)->sacked = 0;
73836
73837 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73838 - if (!sk)
73839 + if (!sk) {
73840 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73841 + ret = 1;
73842 +#endif
73843 goto no_tcp_socket;
73844 + }
73845
73846 process:
73847 - if (sk->sk_state == TCP_TIME_WAIT)
73848 + if (sk->sk_state == TCP_TIME_WAIT) {
73849 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73850 + ret = 2;
73851 +#endif
73852 goto do_time_wait;
73853 + }
73854
73855 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73856 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73857 @@ -1783,6 +1798,10 @@ no_tcp_socket:
73858 bad_packet:
73859 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73860 } else {
73861 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73862 + if (!grsec_enable_blackhole || (ret == 1 &&
73863 + (skb->dev->flags & IFF_LOOPBACK)))
73864 +#endif
73865 tcp_v6_send_reset(NULL, skb);
73866 }
73867
73868 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73869 uid,
73870 0, /* non standard timer */
73871 0, /* open_requests have no inode */
73872 - 0, req);
73873 + 0,
73874 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73875 + NULL
73876 +#else
73877 + req
73878 +#endif
73879 + );
73880 }
73881
73882 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73883 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73884 sock_i_uid(sp),
73885 icsk->icsk_probes_out,
73886 sock_i_ino(sp),
73887 - atomic_read(&sp->sk_refcnt), sp,
73888 + atomic_read(&sp->sk_refcnt),
73889 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73890 + NULL,
73891 +#else
73892 + sp,
73893 +#endif
73894 jiffies_to_clock_t(icsk->icsk_rto),
73895 jiffies_to_clock_t(icsk->icsk_ack.ato),
73896 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73897 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73898 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73899 tw->tw_substate, 0, 0,
73900 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73901 - atomic_read(&tw->tw_refcnt), tw);
73902 + atomic_read(&tw->tw_refcnt),
73903 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73904 + NULL
73905 +#else
73906 + tw
73907 +#endif
73908 + );
73909 }
73910
73911 static int tcp6_seq_show(struct seq_file *seq, void *v)
73912 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73913 index 8c25419..47a51ae 100644
73914 --- a/net/ipv6/udp.c
73915 +++ b/net/ipv6/udp.c
73916 @@ -50,6 +50,10 @@
73917 #include <linux/seq_file.h>
73918 #include "udp_impl.h"
73919
73920 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73921 +extern int grsec_enable_blackhole;
73922 +#endif
73923 +
73924 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73925 {
73926 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73927 @@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73928
73929 return 0;
73930 drop:
73931 - atomic_inc(&sk->sk_drops);
73932 + atomic_inc_unchecked(&sk->sk_drops);
73933 drop_no_sk_drops_inc:
73934 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73935 kfree_skb(skb);
73936 @@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73937 continue;
73938 }
73939 drop:
73940 - atomic_inc(&sk->sk_drops);
73941 + atomic_inc_unchecked(&sk->sk_drops);
73942 UDP6_INC_STATS_BH(sock_net(sk),
73943 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73944 UDP6_INC_STATS_BH(sock_net(sk),
73945 @@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73946 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73947 proto == IPPROTO_UDPLITE);
73948
73949 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73950 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73951 +#endif
73952 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73953
73954 kfree_skb(skb);
73955 @@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73956 if (!sock_owned_by_user(sk))
73957 udpv6_queue_rcv_skb(sk, skb);
73958 else if (sk_add_backlog(sk, skb)) {
73959 - atomic_inc(&sk->sk_drops);
73960 + atomic_inc_unchecked(&sk->sk_drops);
73961 bh_unlock_sock(sk);
73962 sock_put(sk);
73963 goto discard;
73964 @@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73965 0, 0L, 0,
73966 sock_i_uid(sp), 0,
73967 sock_i_ino(sp),
73968 - atomic_read(&sp->sk_refcnt), sp,
73969 - atomic_read(&sp->sk_drops));
73970 + atomic_read(&sp->sk_refcnt),
73971 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73972 + NULL,
73973 +#else
73974 + sp,
73975 +#endif
73976 + atomic_read_unchecked(&sp->sk_drops));
73977 }
73978
73979 int udp6_seq_show(struct seq_file *seq, void *v)
73980 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73981 index 253695d..9481ce8 100644
73982 --- a/net/irda/ircomm/ircomm_tty.c
73983 +++ b/net/irda/ircomm/ircomm_tty.c
73984 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73985 add_wait_queue(&self->open_wait, &wait);
73986
73987 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73988 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73989 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73990
73991 /* As far as I can see, we protect open_count - Jean II */
73992 spin_lock_irqsave(&self->spinlock, flags);
73993 if (!tty_hung_up_p(filp)) {
73994 extra_count = 1;
73995 - self->open_count--;
73996 + local_dec(&self->open_count);
73997 }
73998 spin_unlock_irqrestore(&self->spinlock, flags);
73999 - self->blocked_open++;
74000 + local_inc(&self->blocked_open);
74001
74002 while (1) {
74003 if (tty->termios->c_cflag & CBAUD) {
74004 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
74005 }
74006
74007 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
74008 - __FILE__,__LINE__, tty->driver->name, self->open_count );
74009 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
74010
74011 schedule();
74012 }
74013 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
74014 if (extra_count) {
74015 /* ++ is not atomic, so this should be protected - Jean II */
74016 spin_lock_irqsave(&self->spinlock, flags);
74017 - self->open_count++;
74018 + local_inc(&self->open_count);
74019 spin_unlock_irqrestore(&self->spinlock, flags);
74020 }
74021 - self->blocked_open--;
74022 + local_dec(&self->blocked_open);
74023
74024 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
74025 - __FILE__,__LINE__, tty->driver->name, self->open_count);
74026 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
74027
74028 if (!retval)
74029 self->flags |= ASYNC_NORMAL_ACTIVE;
74030 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
74031 }
74032 /* ++ is not atomic, so this should be protected - Jean II */
74033 spin_lock_irqsave(&self->spinlock, flags);
74034 - self->open_count++;
74035 + local_inc(&self->open_count);
74036
74037 tty->driver_data = self;
74038 self->tty = tty;
74039 spin_unlock_irqrestore(&self->spinlock, flags);
74040
74041 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
74042 - self->line, self->open_count);
74043 + self->line, local_read(&self->open_count));
74044
74045 /* Not really used by us, but lets do it anyway */
74046 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
74047 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74048 return;
74049 }
74050
74051 - if ((tty->count == 1) && (self->open_count != 1)) {
74052 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
74053 /*
74054 * Uh, oh. tty->count is 1, which means that the tty
74055 * structure will be freed. state->count should always
74056 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74057 */
74058 IRDA_DEBUG(0, "%s(), bad serial port count; "
74059 "tty->count is 1, state->count is %d\n", __func__ ,
74060 - self->open_count);
74061 - self->open_count = 1;
74062 + local_read(&self->open_count));
74063 + local_set(&self->open_count, 1);
74064 }
74065
74066 - if (--self->open_count < 0) {
74067 + if (local_dec_return(&self->open_count) < 0) {
74068 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
74069 - __func__, self->line, self->open_count);
74070 - self->open_count = 0;
74071 + __func__, self->line, local_read(&self->open_count));
74072 + local_set(&self->open_count, 0);
74073 }
74074 - if (self->open_count) {
74075 + if (local_read(&self->open_count)) {
74076 spin_unlock_irqrestore(&self->spinlock, flags);
74077
74078 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
74079 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74080 tty->closing = 0;
74081 self->tty = NULL;
74082
74083 - if (self->blocked_open) {
74084 + if (local_read(&self->blocked_open)) {
74085 if (self->close_delay)
74086 schedule_timeout_interruptible(self->close_delay);
74087 wake_up_interruptible(&self->open_wait);
74088 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
74089 spin_lock_irqsave(&self->spinlock, flags);
74090 self->flags &= ~ASYNC_NORMAL_ACTIVE;
74091 self->tty = NULL;
74092 - self->open_count = 0;
74093 + local_set(&self->open_count, 0);
74094 spin_unlock_irqrestore(&self->spinlock, flags);
74095
74096 wake_up_interruptible(&self->open_wait);
74097 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
74098 seq_putc(m, '\n');
74099
74100 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
74101 - seq_printf(m, "Open count: %d\n", self->open_count);
74102 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
74103 seq_printf(m, "Max data size: %d\n", self->max_data_size);
74104 seq_printf(m, "Max header size: %d\n", self->max_header_size);
74105
74106 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
74107 index 274d150..656a144 100644
74108 --- a/net/iucv/af_iucv.c
74109 +++ b/net/iucv/af_iucv.c
74110 @@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
74111
74112 write_lock_bh(&iucv_sk_list.lock);
74113
74114 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
74115 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74116 while (__iucv_get_sock_by_name(name)) {
74117 sprintf(name, "%08x",
74118 - atomic_inc_return(&iucv_sk_list.autobind_name));
74119 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74120 }
74121
74122 write_unlock_bh(&iucv_sk_list.lock);
74123 diff --git a/net/key/af_key.c b/net/key/af_key.c
74124 index 1e733e9..3d73c9f 100644
74125 --- a/net/key/af_key.c
74126 +++ b/net/key/af_key.c
74127 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
74128 static u32 get_acqseq(void)
74129 {
74130 u32 res;
74131 - static atomic_t acqseq;
74132 + static atomic_unchecked_t acqseq;
74133
74134 do {
74135 - res = atomic_inc_return(&acqseq);
74136 + res = atomic_inc_return_unchecked(&acqseq);
74137 } while (!res);
74138 return res;
74139 }
74140 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
74141 index 73495f1..ad51356 100644
74142 --- a/net/mac80211/ieee80211_i.h
74143 +++ b/net/mac80211/ieee80211_i.h
74144 @@ -27,6 +27,7 @@
74145 #include <net/ieee80211_radiotap.h>
74146 #include <net/cfg80211.h>
74147 #include <net/mac80211.h>
74148 +#include <asm/local.h>
74149 #include "key.h"
74150 #include "sta_info.h"
74151
74152 @@ -764,7 +765,7 @@ struct ieee80211_local {
74153 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
74154 spinlock_t queue_stop_reason_lock;
74155
74156 - int open_count;
74157 + local_t open_count;
74158 int monitors, cooked_mntrs;
74159 /* number of interfaces with corresponding FIF_ flags */
74160 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
74161 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
74162 index 30d7355..e260095 100644
74163 --- a/net/mac80211/iface.c
74164 +++ b/net/mac80211/iface.c
74165 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74166 break;
74167 }
74168
74169 - if (local->open_count == 0) {
74170 + if (local_read(&local->open_count) == 0) {
74171 res = drv_start(local);
74172 if (res)
74173 goto err_del_bss;
74174 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74175 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
74176
74177 if (!is_valid_ether_addr(dev->dev_addr)) {
74178 - if (!local->open_count)
74179 + if (!local_read(&local->open_count))
74180 drv_stop(local);
74181 return -EADDRNOTAVAIL;
74182 }
74183 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74184 mutex_unlock(&local->mtx);
74185
74186 if (coming_up)
74187 - local->open_count++;
74188 + local_inc(&local->open_count);
74189
74190 if (hw_reconf_flags) {
74191 ieee80211_hw_config(local, hw_reconf_flags);
74192 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74193 err_del_interface:
74194 drv_remove_interface(local, &sdata->vif);
74195 err_stop:
74196 - if (!local->open_count)
74197 + if (!local_read(&local->open_count))
74198 drv_stop(local);
74199 err_del_bss:
74200 sdata->bss = NULL;
74201 @@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
74202 }
74203
74204 if (going_down)
74205 - local->open_count--;
74206 + local_dec(&local->open_count);
74207
74208 switch (sdata->vif.type) {
74209 case NL80211_IFTYPE_AP_VLAN:
74210 @@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
74211
74212 ieee80211_recalc_ps(local, -1);
74213
74214 - if (local->open_count == 0) {
74215 + if (local_read(&local->open_count) == 0) {
74216 if (local->ops->napi_poll)
74217 napi_disable(&local->napi);
74218 ieee80211_clear_tx_pending(local);
74219 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
74220 index a7536fd..4039cc0 100644
74221 --- a/net/mac80211/main.c
74222 +++ b/net/mac80211/main.c
74223 @@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
74224 local->hw.conf.power_level = power;
74225 }
74226
74227 - if (changed && local->open_count) {
74228 + if (changed && local_read(&local->open_count)) {
74229 ret = drv_config(local, changed);
74230 /*
74231 * Goal:
74232 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
74233 index 9ee7164..56c5061 100644
74234 --- a/net/mac80211/pm.c
74235 +++ b/net/mac80211/pm.c
74236 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74237 struct ieee80211_sub_if_data *sdata;
74238 struct sta_info *sta;
74239
74240 - if (!local->open_count)
74241 + if (!local_read(&local->open_count))
74242 goto suspend;
74243
74244 ieee80211_scan_cancel(local);
74245 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74246 cancel_work_sync(&local->dynamic_ps_enable_work);
74247 del_timer_sync(&local->dynamic_ps_timer);
74248
74249 - local->wowlan = wowlan && local->open_count;
74250 + local->wowlan = wowlan && local_read(&local->open_count);
74251 if (local->wowlan) {
74252 int err = drv_suspend(local, wowlan);
74253 if (err < 0) {
74254 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74255 }
74256
74257 /* stop hardware - this must stop RX */
74258 - if (local->open_count)
74259 + if (local_read(&local->open_count))
74260 ieee80211_stop_device(local);
74261
74262 suspend:
74263 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
74264 index 5a5a776..9600b11 100644
74265 --- a/net/mac80211/rate.c
74266 +++ b/net/mac80211/rate.c
74267 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
74268
74269 ASSERT_RTNL();
74270
74271 - if (local->open_count)
74272 + if (local_read(&local->open_count))
74273 return -EBUSY;
74274
74275 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
74276 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
74277 index c97a065..ff61928 100644
74278 --- a/net/mac80211/rc80211_pid_debugfs.c
74279 +++ b/net/mac80211/rc80211_pid_debugfs.c
74280 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
74281
74282 spin_unlock_irqrestore(&events->lock, status);
74283
74284 - if (copy_to_user(buf, pb, p))
74285 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
74286 return -EFAULT;
74287
74288 return p;
74289 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
74290 index d5230ec..c604b21 100644
74291 --- a/net/mac80211/util.c
74292 +++ b/net/mac80211/util.c
74293 @@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
74294 drv_set_coverage_class(local, hw->wiphy->coverage_class);
74295
74296 /* everything else happens only if HW was up & running */
74297 - if (!local->open_count)
74298 + if (!local_read(&local->open_count))
74299 goto wake_up;
74300
74301 /*
74302 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
74303 index d5597b7..ab6d39c 100644
74304 --- a/net/netfilter/Kconfig
74305 +++ b/net/netfilter/Kconfig
74306 @@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
74307
74308 To compile it as a module, choose M here. If unsure, say N.
74309
74310 +config NETFILTER_XT_MATCH_GRADM
74311 + tristate '"gradm" match support'
74312 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
74313 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
74314 + ---help---
74315 + The gradm match allows to match on grsecurity RBAC being enabled.
74316 + It is useful when iptables rules are applied early on bootup to
74317 + prevent connections to the machine (except from a trusted host)
74318 + while the RBAC system is disabled.
74319 +
74320 config NETFILTER_XT_MATCH_HASHLIMIT
74321 tristate '"hashlimit" match support'
74322 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
74323 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
74324 index 1a02853..5d8c22e 100644
74325 --- a/net/netfilter/Makefile
74326 +++ b/net/netfilter/Makefile
74327 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
74328 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
74329 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
74330 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
74331 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
74332 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
74333 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
74334 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
74335 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
74336 index 29fa5ba..8debc79 100644
74337 --- a/net/netfilter/ipvs/ip_vs_conn.c
74338 +++ b/net/netfilter/ipvs/ip_vs_conn.c
74339 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
74340 /* Increase the refcnt counter of the dest */
74341 atomic_inc(&dest->refcnt);
74342
74343 - conn_flags = atomic_read(&dest->conn_flags);
74344 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
74345 if (cp->protocol != IPPROTO_UDP)
74346 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
74347 /* Bind with the destination and its corresponding transmitter */
74348 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
74349 atomic_set(&cp->refcnt, 1);
74350
74351 atomic_set(&cp->n_control, 0);
74352 - atomic_set(&cp->in_pkts, 0);
74353 + atomic_set_unchecked(&cp->in_pkts, 0);
74354
74355 atomic_inc(&ipvs->conn_count);
74356 if (flags & IP_VS_CONN_F_NO_CPORT)
74357 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
74358
74359 /* Don't drop the entry if its number of incoming packets is not
74360 located in [0, 8] */
74361 - i = atomic_read(&cp->in_pkts);
74362 + i = atomic_read_unchecked(&cp->in_pkts);
74363 if (i > 8 || i < 0) return 0;
74364
74365 if (!todrop_rate[i]) return 0;
74366 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
74367 index 093cc32..9209ae1 100644
74368 --- a/net/netfilter/ipvs/ip_vs_core.c
74369 +++ b/net/netfilter/ipvs/ip_vs_core.c
74370 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
74371 ret = cp->packet_xmit(skb, cp, pd->pp);
74372 /* do not touch skb anymore */
74373
74374 - atomic_inc(&cp->in_pkts);
74375 + atomic_inc_unchecked(&cp->in_pkts);
74376 ip_vs_conn_put(cp);
74377 return ret;
74378 }
74379 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
74380 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
74381 pkts = sysctl_sync_threshold(ipvs);
74382 else
74383 - pkts = atomic_add_return(1, &cp->in_pkts);
74384 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74385
74386 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
74387 cp->protocol == IPPROTO_SCTP) {
74388 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
74389 index e1a66cf..0910076 100644
74390 --- a/net/netfilter/ipvs/ip_vs_ctl.c
74391 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
74392 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
74393 ip_vs_rs_hash(ipvs, dest);
74394 write_unlock_bh(&ipvs->rs_lock);
74395 }
74396 - atomic_set(&dest->conn_flags, conn_flags);
74397 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
74398
74399 /* bind the service */
74400 if (!dest->svc) {
74401 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74402 " %-7s %-6d %-10d %-10d\n",
74403 &dest->addr.in6,
74404 ntohs(dest->port),
74405 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74406 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74407 atomic_read(&dest->weight),
74408 atomic_read(&dest->activeconns),
74409 atomic_read(&dest->inactconns));
74410 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74411 "%-7s %-6d %-10d %-10d\n",
74412 ntohl(dest->addr.ip),
74413 ntohs(dest->port),
74414 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74415 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74416 atomic_read(&dest->weight),
74417 atomic_read(&dest->activeconns),
74418 atomic_read(&dest->inactconns));
74419 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
74420
74421 entry.addr = dest->addr.ip;
74422 entry.port = dest->port;
74423 - entry.conn_flags = atomic_read(&dest->conn_flags);
74424 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74425 entry.weight = atomic_read(&dest->weight);
74426 entry.u_threshold = dest->u_threshold;
74427 entry.l_threshold = dest->l_threshold;
74428 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
74429 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74430
74431 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74432 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74433 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74434 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74435 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74436 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74437 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74438 index 2b6678c0..aaa41fc 100644
74439 --- a/net/netfilter/ipvs/ip_vs_sync.c
74440 +++ b/net/netfilter/ipvs/ip_vs_sync.c
74441 @@ -649,7 +649,7 @@ control:
74442 * i.e only increment in_pkts for Templates.
74443 */
74444 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74445 - int pkts = atomic_add_return(1, &cp->in_pkts);
74446 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74447
74448 if (pkts % sysctl_sync_period(ipvs) != 1)
74449 return;
74450 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74451
74452 if (opt)
74453 memcpy(&cp->in_seq, opt, sizeof(*opt));
74454 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74455 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74456 cp->state = state;
74457 cp->old_state = cp->state;
74458 /*
74459 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74460 index aa2d720..d8aa111 100644
74461 --- a/net/netfilter/ipvs/ip_vs_xmit.c
74462 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
74463 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74464 else
74465 rc = NF_ACCEPT;
74466 /* do not touch skb anymore */
74467 - atomic_inc(&cp->in_pkts);
74468 + atomic_inc_unchecked(&cp->in_pkts);
74469 goto out;
74470 }
74471
74472 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74473 else
74474 rc = NF_ACCEPT;
74475 /* do not touch skb anymore */
74476 - atomic_inc(&cp->in_pkts);
74477 + atomic_inc_unchecked(&cp->in_pkts);
74478 goto out;
74479 }
74480
74481 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74482 index 66b2c54..c7884e3 100644
74483 --- a/net/netfilter/nfnetlink_log.c
74484 +++ b/net/netfilter/nfnetlink_log.c
74485 @@ -70,7 +70,7 @@ struct nfulnl_instance {
74486 };
74487
74488 static DEFINE_SPINLOCK(instances_lock);
74489 -static atomic_t global_seq;
74490 +static atomic_unchecked_t global_seq;
74491
74492 #define INSTANCE_BUCKETS 16
74493 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74494 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74495 /* global sequence number */
74496 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74497 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74498 - htonl(atomic_inc_return(&global_seq)));
74499 + htonl(atomic_inc_return_unchecked(&global_seq)));
74500
74501 if (data_len) {
74502 struct nlattr *nla;
74503 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74504 new file mode 100644
74505 index 0000000..6905327
74506 --- /dev/null
74507 +++ b/net/netfilter/xt_gradm.c
74508 @@ -0,0 +1,51 @@
74509 +/*
74510 + * gradm match for netfilter
74511 + * Copyright © Zbigniew Krzystolik, 2010
74512 + *
74513 + * This program is free software; you can redistribute it and/or modify
74514 + * it under the terms of the GNU General Public License; either version
74515 + * 2 or 3 as published by the Free Software Foundation.
74516 + */
74517 +#include <linux/module.h>
74518 +#include <linux/moduleparam.h>
74519 +#include <linux/skbuff.h>
74520 +#include <linux/netfilter/x_tables.h>
74521 +#include <linux/grsecurity.h>
74522 +#include <linux/netfilter/xt_gradm.h>
74523 +
74524 +static bool
74525 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
74526 +{
74527 + const struct xt_gradm_mtinfo *info = par->matchinfo;
74528 + bool retval = false;
74529 + if (gr_acl_is_enabled())
74530 + retval = true;
74531 + return retval ^ info->invflags;
74532 +}
74533 +
74534 +static struct xt_match gradm_mt_reg __read_mostly = {
74535 + .name = "gradm",
74536 + .revision = 0,
74537 + .family = NFPROTO_UNSPEC,
74538 + .match = gradm_mt,
74539 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
74540 + .me = THIS_MODULE,
74541 +};
74542 +
74543 +static int __init gradm_mt_init(void)
74544 +{
74545 + return xt_register_match(&gradm_mt_reg);
74546 +}
74547 +
74548 +static void __exit gradm_mt_exit(void)
74549 +{
74550 + xt_unregister_match(&gradm_mt_reg);
74551 +}
74552 +
74553 +module_init(gradm_mt_init);
74554 +module_exit(gradm_mt_exit);
74555 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
74556 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
74557 +MODULE_LICENSE("GPL");
74558 +MODULE_ALIAS("ipt_gradm");
74559 +MODULE_ALIAS("ip6t_gradm");
74560 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
74561 index 4fe4fb4..87a89e5 100644
74562 --- a/net/netfilter/xt_statistic.c
74563 +++ b/net/netfilter/xt_statistic.c
74564 @@ -19,7 +19,7 @@
74565 #include <linux/module.h>
74566
74567 struct xt_statistic_priv {
74568 - atomic_t count;
74569 + atomic_unchecked_t count;
74570 } ____cacheline_aligned_in_smp;
74571
74572 MODULE_LICENSE("GPL");
74573 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
74574 break;
74575 case XT_STATISTIC_MODE_NTH:
74576 do {
74577 - oval = atomic_read(&info->master->count);
74578 + oval = atomic_read_unchecked(&info->master->count);
74579 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
74580 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
74581 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
74582 if (nval == 0)
74583 ret = !ret;
74584 break;
74585 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
74586 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
74587 if (info->master == NULL)
74588 return -ENOMEM;
74589 - atomic_set(&info->master->count, info->u.nth.count);
74590 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
74591
74592 return 0;
74593 }
74594 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
74595 index 1201b6d..bcff8c6 100644
74596 --- a/net/netlink/af_netlink.c
74597 +++ b/net/netlink/af_netlink.c
74598 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
74599 sk->sk_error_report(sk);
74600 }
74601 }
74602 - atomic_inc(&sk->sk_drops);
74603 + atomic_inc_unchecked(&sk->sk_drops);
74604 }
74605
74606 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
74607 @@ -1999,7 +1999,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
74608 sk_wmem_alloc_get(s),
74609 nlk->cb,
74610 atomic_read(&s->sk_refcnt),
74611 - atomic_read(&s->sk_drops),
74612 + atomic_read_unchecked(&s->sk_drops),
74613 sock_i_ino(s)
74614 );
74615
74616 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
74617 index 732152f..60bb09e 100644
74618 --- a/net/netrom/af_netrom.c
74619 +++ b/net/netrom/af_netrom.c
74620 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74621 struct sock *sk = sock->sk;
74622 struct nr_sock *nr = nr_sk(sk);
74623
74624 + memset(sax, 0, sizeof(*sax));
74625 lock_sock(sk);
74626 if (peer != 0) {
74627 if (sk->sk_state != TCP_ESTABLISHED) {
74628 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74629 *uaddr_len = sizeof(struct full_sockaddr_ax25);
74630 } else {
74631 sax->fsa_ax25.sax25_family = AF_NETROM;
74632 - sax->fsa_ax25.sax25_ndigis = 0;
74633 sax->fsa_ax25.sax25_call = nr->source_addr;
74634 *uaddr_len = sizeof(struct sockaddr_ax25);
74635 }
74636 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
74637 index d9d4970..d5a6a68 100644
74638 --- a/net/packet/af_packet.c
74639 +++ b/net/packet/af_packet.c
74640 @@ -1675,7 +1675,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74641
74642 spin_lock(&sk->sk_receive_queue.lock);
74643 po->stats.tp_packets++;
74644 - skb->dropcount = atomic_read(&sk->sk_drops);
74645 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74646 __skb_queue_tail(&sk->sk_receive_queue, skb);
74647 spin_unlock(&sk->sk_receive_queue.lock);
74648 sk->sk_data_ready(sk, skb->len);
74649 @@ -1684,7 +1684,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74650 drop_n_acct:
74651 spin_lock(&sk->sk_receive_queue.lock);
74652 po->stats.tp_drops++;
74653 - atomic_inc(&sk->sk_drops);
74654 + atomic_inc_unchecked(&sk->sk_drops);
74655 spin_unlock(&sk->sk_receive_queue.lock);
74656
74657 drop_n_restore:
74658 @@ -3266,7 +3266,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74659 case PACKET_HDRLEN:
74660 if (len > sizeof(int))
74661 len = sizeof(int);
74662 - if (copy_from_user(&val, optval, len))
74663 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
74664 return -EFAULT;
74665 switch (val) {
74666 case TPACKET_V1:
74667 @@ -3316,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74668
74669 if (put_user(len, optlen))
74670 return -EFAULT;
74671 - if (copy_to_user(optval, data, len))
74672 + if (len > sizeof(st) || copy_to_user(optval, data, len))
74673 return -EFAULT;
74674 return 0;
74675 }
74676 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
74677 index d65f699..05aa6ce 100644
74678 --- a/net/phonet/af_phonet.c
74679 +++ b/net/phonet/af_phonet.c
74680 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
74681 {
74682 struct phonet_protocol *pp;
74683
74684 - if (protocol >= PHONET_NPROTO)
74685 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74686 return NULL;
74687
74688 rcu_read_lock();
74689 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
74690 {
74691 int err = 0;
74692
74693 - if (protocol >= PHONET_NPROTO)
74694 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74695 return -EINVAL;
74696
74697 err = proto_register(pp->prot, 1);
74698 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
74699 index 2ba6e9f..409573f 100644
74700 --- a/net/phonet/pep.c
74701 +++ b/net/phonet/pep.c
74702 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74703
74704 case PNS_PEP_CTRL_REQ:
74705 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
74706 - atomic_inc(&sk->sk_drops);
74707 + atomic_inc_unchecked(&sk->sk_drops);
74708 break;
74709 }
74710 __skb_pull(skb, 4);
74711 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74712 }
74713
74714 if (pn->rx_credits == 0) {
74715 - atomic_inc(&sk->sk_drops);
74716 + atomic_inc_unchecked(&sk->sk_drops);
74717 err = -ENOBUFS;
74718 break;
74719 }
74720 @@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
74721 }
74722
74723 if (pn->rx_credits == 0) {
74724 - atomic_inc(&sk->sk_drops);
74725 + atomic_inc_unchecked(&sk->sk_drops);
74726 err = NET_RX_DROP;
74727 break;
74728 }
74729 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
74730 index 4c7eff3..59c727f 100644
74731 --- a/net/phonet/socket.c
74732 +++ b/net/phonet/socket.c
74733 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
74734 pn->resource, sk->sk_state,
74735 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
74736 sock_i_uid(sk), sock_i_ino(sk),
74737 - atomic_read(&sk->sk_refcnt), sk,
74738 - atomic_read(&sk->sk_drops), &len);
74739 + atomic_read(&sk->sk_refcnt),
74740 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74741 + NULL,
74742 +#else
74743 + sk,
74744 +#endif
74745 + atomic_read_unchecked(&sk->sk_drops), &len);
74746 }
74747 seq_printf(seq, "%*s\n", 127 - len, "");
74748 return 0;
74749 diff --git a/net/rds/cong.c b/net/rds/cong.c
74750 index e5b65ac..f3b6fb7 100644
74751 --- a/net/rds/cong.c
74752 +++ b/net/rds/cong.c
74753 @@ -78,7 +78,7 @@
74754 * finds that the saved generation number is smaller than the global generation
74755 * number, it wakes up the process.
74756 */
74757 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
74758 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
74759
74760 /*
74761 * Congestion monitoring
74762 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
74763 rdsdebug("waking map %p for %pI4\n",
74764 map, &map->m_addr);
74765 rds_stats_inc(s_cong_update_received);
74766 - atomic_inc(&rds_cong_generation);
74767 + atomic_inc_unchecked(&rds_cong_generation);
74768 if (waitqueue_active(&map->m_waitq))
74769 wake_up(&map->m_waitq);
74770 if (waitqueue_active(&rds_poll_waitq))
74771 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
74772
74773 int rds_cong_updated_since(unsigned long *recent)
74774 {
74775 - unsigned long gen = atomic_read(&rds_cong_generation);
74776 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
74777
74778 if (likely(*recent == gen))
74779 return 0;
74780 diff --git a/net/rds/ib.h b/net/rds/ib.h
74781 index edfaaaf..8c89879 100644
74782 --- a/net/rds/ib.h
74783 +++ b/net/rds/ib.h
74784 @@ -128,7 +128,7 @@ struct rds_ib_connection {
74785 /* sending acks */
74786 unsigned long i_ack_flags;
74787 #ifdef KERNEL_HAS_ATOMIC64
74788 - atomic64_t i_ack_next; /* next ACK to send */
74789 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74790 #else
74791 spinlock_t i_ack_lock; /* protect i_ack_next */
74792 u64 i_ack_next; /* next ACK to send */
74793 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
74794 index 51c8689..36c555f 100644
74795 --- a/net/rds/ib_cm.c
74796 +++ b/net/rds/ib_cm.c
74797 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
74798 /* Clear the ACK state */
74799 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74800 #ifdef KERNEL_HAS_ATOMIC64
74801 - atomic64_set(&ic->i_ack_next, 0);
74802 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74803 #else
74804 ic->i_ack_next = 0;
74805 #endif
74806 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
74807 index e29e0ca..fa3a6a3 100644
74808 --- a/net/rds/ib_recv.c
74809 +++ b/net/rds/ib_recv.c
74810 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74811 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
74812 int ack_required)
74813 {
74814 - atomic64_set(&ic->i_ack_next, seq);
74815 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74816 if (ack_required) {
74817 smp_mb__before_clear_bit();
74818 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74819 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74820 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74821 smp_mb__after_clear_bit();
74822
74823 - return atomic64_read(&ic->i_ack_next);
74824 + return atomic64_read_unchecked(&ic->i_ack_next);
74825 }
74826 #endif
74827
74828 diff --git a/net/rds/iw.h b/net/rds/iw.h
74829 index 04ce3b1..48119a6 100644
74830 --- a/net/rds/iw.h
74831 +++ b/net/rds/iw.h
74832 @@ -134,7 +134,7 @@ struct rds_iw_connection {
74833 /* sending acks */
74834 unsigned long i_ack_flags;
74835 #ifdef KERNEL_HAS_ATOMIC64
74836 - atomic64_t i_ack_next; /* next ACK to send */
74837 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74838 #else
74839 spinlock_t i_ack_lock; /* protect i_ack_next */
74840 u64 i_ack_next; /* next ACK to send */
74841 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
74842 index 9556d28..f046d0e 100644
74843 --- a/net/rds/iw_cm.c
74844 +++ b/net/rds/iw_cm.c
74845 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
74846 /* Clear the ACK state */
74847 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74848 #ifdef KERNEL_HAS_ATOMIC64
74849 - atomic64_set(&ic->i_ack_next, 0);
74850 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74851 #else
74852 ic->i_ack_next = 0;
74853 #endif
74854 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
74855 index 5e57347..3916042 100644
74856 --- a/net/rds/iw_recv.c
74857 +++ b/net/rds/iw_recv.c
74858 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74859 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
74860 int ack_required)
74861 {
74862 - atomic64_set(&ic->i_ack_next, seq);
74863 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74864 if (ack_required) {
74865 smp_mb__before_clear_bit();
74866 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74867 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74868 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74869 smp_mb__after_clear_bit();
74870
74871 - return atomic64_read(&ic->i_ack_next);
74872 + return atomic64_read_unchecked(&ic->i_ack_next);
74873 }
74874 #endif
74875
74876 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
74877 index edac9ef..16bcb98 100644
74878 --- a/net/rds/tcp.c
74879 +++ b/net/rds/tcp.c
74880 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
74881 int val = 1;
74882
74883 set_fs(KERNEL_DS);
74884 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
74885 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
74886 sizeof(val));
74887 set_fs(oldfs);
74888 }
74889 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
74890 index 1b4fd68..2234175 100644
74891 --- a/net/rds/tcp_send.c
74892 +++ b/net/rds/tcp_send.c
74893 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
74894
74895 oldfs = get_fs();
74896 set_fs(KERNEL_DS);
74897 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
74898 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
74899 sizeof(val));
74900 set_fs(oldfs);
74901 }
74902 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
74903 index 74c064c..fdec26f 100644
74904 --- a/net/rxrpc/af_rxrpc.c
74905 +++ b/net/rxrpc/af_rxrpc.c
74906 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
74907 __be32 rxrpc_epoch;
74908
74909 /* current debugging ID */
74910 -atomic_t rxrpc_debug_id;
74911 +atomic_unchecked_t rxrpc_debug_id;
74912
74913 /* count of skbs currently in use */
74914 atomic_t rxrpc_n_skbs;
74915 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
74916 index f99cfce..cc529dd 100644
74917 --- a/net/rxrpc/ar-ack.c
74918 +++ b/net/rxrpc/ar-ack.c
74919 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74920
74921 _enter("{%d,%d,%d,%d},",
74922 call->acks_hard, call->acks_unacked,
74923 - atomic_read(&call->sequence),
74924 + atomic_read_unchecked(&call->sequence),
74925 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
74926
74927 stop = 0;
74928 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74929
74930 /* each Tx packet has a new serial number */
74931 sp->hdr.serial =
74932 - htonl(atomic_inc_return(&call->conn->serial));
74933 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
74934
74935 hdr = (struct rxrpc_header *) txb->head;
74936 hdr->serial = sp->hdr.serial;
74937 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
74938 */
74939 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
74940 {
74941 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
74942 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
74943 }
74944
74945 /*
74946 @@ -629,7 +629,7 @@ process_further:
74947
74948 latest = ntohl(sp->hdr.serial);
74949 hard = ntohl(ack.firstPacket);
74950 - tx = atomic_read(&call->sequence);
74951 + tx = atomic_read_unchecked(&call->sequence);
74952
74953 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74954 latest,
74955 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
74956 goto maybe_reschedule;
74957
74958 send_ACK_with_skew:
74959 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
74960 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
74961 ntohl(ack.serial));
74962 send_ACK:
74963 mtu = call->conn->trans->peer->if_mtu;
74964 @@ -1173,7 +1173,7 @@ send_ACK:
74965 ackinfo.rxMTU = htonl(5692);
74966 ackinfo.jumbo_max = htonl(4);
74967
74968 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74969 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74970 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74971 ntohl(hdr.serial),
74972 ntohs(ack.maxSkew),
74973 @@ -1191,7 +1191,7 @@ send_ACK:
74974 send_message:
74975 _debug("send message");
74976
74977 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74978 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74979 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
74980 send_message_2:
74981
74982 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
74983 index bf656c2..48f9d27 100644
74984 --- a/net/rxrpc/ar-call.c
74985 +++ b/net/rxrpc/ar-call.c
74986 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
74987 spin_lock_init(&call->lock);
74988 rwlock_init(&call->state_lock);
74989 atomic_set(&call->usage, 1);
74990 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
74991 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74992 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
74993
74994 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
74995 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
74996 index 4106ca9..a338d7a 100644
74997 --- a/net/rxrpc/ar-connection.c
74998 +++ b/net/rxrpc/ar-connection.c
74999 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
75000 rwlock_init(&conn->lock);
75001 spin_lock_init(&conn->state_lock);
75002 atomic_set(&conn->usage, 1);
75003 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
75004 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75005 conn->avail_calls = RXRPC_MAXCALLS;
75006 conn->size_align = 4;
75007 conn->header_size = sizeof(struct rxrpc_header);
75008 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
75009 index e7ed43a..6afa140 100644
75010 --- a/net/rxrpc/ar-connevent.c
75011 +++ b/net/rxrpc/ar-connevent.c
75012 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
75013
75014 len = iov[0].iov_len + iov[1].iov_len;
75015
75016 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
75017 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75018 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
75019
75020 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
75021 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
75022 index 1a2b0633..e8d1382 100644
75023 --- a/net/rxrpc/ar-input.c
75024 +++ b/net/rxrpc/ar-input.c
75025 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
75026 /* track the latest serial number on this connection for ACK packet
75027 * information */
75028 serial = ntohl(sp->hdr.serial);
75029 - hi_serial = atomic_read(&call->conn->hi_serial);
75030 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
75031 while (serial > hi_serial)
75032 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
75033 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
75034 serial);
75035
75036 /* request ACK generation for any ACK or DATA packet that requests
75037 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
75038 index 8e22bd3..f66d1c0 100644
75039 --- a/net/rxrpc/ar-internal.h
75040 +++ b/net/rxrpc/ar-internal.h
75041 @@ -272,8 +272,8 @@ struct rxrpc_connection {
75042 int error; /* error code for local abort */
75043 int debug_id; /* debug ID for printks */
75044 unsigned call_counter; /* call ID counter */
75045 - atomic_t serial; /* packet serial number counter */
75046 - atomic_t hi_serial; /* highest serial number received */
75047 + atomic_unchecked_t serial; /* packet serial number counter */
75048 + atomic_unchecked_t hi_serial; /* highest serial number received */
75049 u8 avail_calls; /* number of calls available */
75050 u8 size_align; /* data size alignment (for security) */
75051 u8 header_size; /* rxrpc + security header size */
75052 @@ -346,7 +346,7 @@ struct rxrpc_call {
75053 spinlock_t lock;
75054 rwlock_t state_lock; /* lock for state transition */
75055 atomic_t usage;
75056 - atomic_t sequence; /* Tx data packet sequence counter */
75057 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
75058 u32 abort_code; /* local/remote abort code */
75059 enum { /* current state of call */
75060 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
75061 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
75062 */
75063 extern atomic_t rxrpc_n_skbs;
75064 extern __be32 rxrpc_epoch;
75065 -extern atomic_t rxrpc_debug_id;
75066 +extern atomic_unchecked_t rxrpc_debug_id;
75067 extern struct workqueue_struct *rxrpc_workqueue;
75068
75069 /*
75070 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
75071 index 87f7135..74d3703 100644
75072 --- a/net/rxrpc/ar-local.c
75073 +++ b/net/rxrpc/ar-local.c
75074 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
75075 spin_lock_init(&local->lock);
75076 rwlock_init(&local->services_lock);
75077 atomic_set(&local->usage, 1);
75078 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
75079 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75080 memcpy(&local->srx, srx, sizeof(*srx));
75081 }
75082
75083 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
75084 index 338d793..47391d0 100644
75085 --- a/net/rxrpc/ar-output.c
75086 +++ b/net/rxrpc/ar-output.c
75087 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
75088 sp->hdr.cid = call->cid;
75089 sp->hdr.callNumber = call->call_id;
75090 sp->hdr.seq =
75091 - htonl(atomic_inc_return(&call->sequence));
75092 + htonl(atomic_inc_return_unchecked(&call->sequence));
75093 sp->hdr.serial =
75094 - htonl(atomic_inc_return(&conn->serial));
75095 + htonl(atomic_inc_return_unchecked(&conn->serial));
75096 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
75097 sp->hdr.userStatus = 0;
75098 sp->hdr.securityIndex = conn->security_ix;
75099 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
75100 index 2754f09..b20e38f 100644
75101 --- a/net/rxrpc/ar-peer.c
75102 +++ b/net/rxrpc/ar-peer.c
75103 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
75104 INIT_LIST_HEAD(&peer->error_targets);
75105 spin_lock_init(&peer->lock);
75106 atomic_set(&peer->usage, 1);
75107 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
75108 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75109 memcpy(&peer->srx, srx, sizeof(*srx));
75110
75111 rxrpc_assess_MTU_size(peer);
75112 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
75113 index 38047f7..9f48511 100644
75114 --- a/net/rxrpc/ar-proc.c
75115 +++ b/net/rxrpc/ar-proc.c
75116 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
75117 atomic_read(&conn->usage),
75118 rxrpc_conn_states[conn->state],
75119 key_serial(conn->key),
75120 - atomic_read(&conn->serial),
75121 - atomic_read(&conn->hi_serial));
75122 + atomic_read_unchecked(&conn->serial),
75123 + atomic_read_unchecked(&conn->hi_serial));
75124
75125 return 0;
75126 }
75127 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
75128 index 92df566..87ec1bf 100644
75129 --- a/net/rxrpc/ar-transport.c
75130 +++ b/net/rxrpc/ar-transport.c
75131 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
75132 spin_lock_init(&trans->client_lock);
75133 rwlock_init(&trans->conn_lock);
75134 atomic_set(&trans->usage, 1);
75135 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
75136 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75137
75138 if (peer->srx.transport.family == AF_INET) {
75139 switch (peer->srx.transport_type) {
75140 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
75141 index 7635107..4670276 100644
75142 --- a/net/rxrpc/rxkad.c
75143 +++ b/net/rxrpc/rxkad.c
75144 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
75145
75146 len = iov[0].iov_len + iov[1].iov_len;
75147
75148 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
75149 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75150 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
75151
75152 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
75153 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
75154
75155 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
75156
75157 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
75158 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75159 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
75160
75161 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
75162 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
75163 index 1e2eee8..ce3967e 100644
75164 --- a/net/sctp/proc.c
75165 +++ b/net/sctp/proc.c
75166 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
75167 seq_printf(seq,
75168 "%8pK %8pK %-3d %-3d %-2d %-4d "
75169 "%4d %8d %8d %7d %5lu %-5d %5d ",
75170 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
75171 + assoc, sk,
75172 + sctp_sk(sk)->type, sk->sk_state,
75173 assoc->state, hash,
75174 assoc->assoc_id,
75175 assoc->sndbuf_used,
75176 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
75177 index 54a7cd2..944edae 100644
75178 --- a/net/sctp/socket.c
75179 +++ b/net/sctp/socket.c
75180 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
75181 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
75182 if (space_left < addrlen)
75183 return -ENOMEM;
75184 - if (copy_to_user(to, &temp, addrlen))
75185 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
75186 return -EFAULT;
75187 to += addrlen;
75188 cnt++;
75189 diff --git a/net/socket.c b/net/socket.c
75190 index 2dce67a..1e91168 100644
75191 --- a/net/socket.c
75192 +++ b/net/socket.c
75193 @@ -88,6 +88,7 @@
75194 #include <linux/nsproxy.h>
75195 #include <linux/magic.h>
75196 #include <linux/slab.h>
75197 +#include <linux/in.h>
75198
75199 #include <asm/uaccess.h>
75200 #include <asm/unistd.h>
75201 @@ -105,6 +106,8 @@
75202 #include <linux/sockios.h>
75203 #include <linux/atalk.h>
75204
75205 +#include <linux/grsock.h>
75206 +
75207 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
75208 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
75209 unsigned long nr_segs, loff_t pos);
75210 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
75211 &sockfs_dentry_operations, SOCKFS_MAGIC);
75212 }
75213
75214 -static struct vfsmount *sock_mnt __read_mostly;
75215 +struct vfsmount *sock_mnt __read_mostly;
75216
75217 static struct file_system_type sock_fs_type = {
75218 .name = "sockfs",
75219 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
75220 return -EAFNOSUPPORT;
75221 if (type < 0 || type >= SOCK_MAX)
75222 return -EINVAL;
75223 + if (protocol < 0)
75224 + return -EINVAL;
75225
75226 /* Compatibility.
75227
75228 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
75229 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
75230 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
75231
75232 + if(!gr_search_socket(family, type, protocol)) {
75233 + retval = -EACCES;
75234 + goto out;
75235 + }
75236 +
75237 + if (gr_handle_sock_all(family, type, protocol)) {
75238 + retval = -EACCES;
75239 + goto out;
75240 + }
75241 +
75242 retval = sock_create(family, type, protocol, &sock);
75243 if (retval < 0)
75244 goto out;
75245 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
75246 if (sock) {
75247 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
75248 if (err >= 0) {
75249 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
75250 + err = -EACCES;
75251 + goto error;
75252 + }
75253 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
75254 + if (err)
75255 + goto error;
75256 +
75257 err = security_socket_bind(sock,
75258 (struct sockaddr *)&address,
75259 addrlen);
75260 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
75261 (struct sockaddr *)
75262 &address, addrlen);
75263 }
75264 +error:
75265 fput_light(sock->file, fput_needed);
75266 }
75267 return err;
75268 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
75269 if ((unsigned)backlog > somaxconn)
75270 backlog = somaxconn;
75271
75272 + if (gr_handle_sock_server_other(sock->sk)) {
75273 + err = -EPERM;
75274 + goto error;
75275 + }
75276 +
75277 + err = gr_search_listen(sock);
75278 + if (err)
75279 + goto error;
75280 +
75281 err = security_socket_listen(sock, backlog);
75282 if (!err)
75283 err = sock->ops->listen(sock, backlog);
75284
75285 +error:
75286 fput_light(sock->file, fput_needed);
75287 }
75288 return err;
75289 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
75290 newsock->type = sock->type;
75291 newsock->ops = sock->ops;
75292
75293 + if (gr_handle_sock_server_other(sock->sk)) {
75294 + err = -EPERM;
75295 + sock_release(newsock);
75296 + goto out_put;
75297 + }
75298 +
75299 + err = gr_search_accept(sock);
75300 + if (err) {
75301 + sock_release(newsock);
75302 + goto out_put;
75303 + }
75304 +
75305 /*
75306 * We don't need try_module_get here, as the listening socket (sock)
75307 * has the protocol module (sock->ops->owner) held.
75308 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
75309 fd_install(newfd, newfile);
75310 err = newfd;
75311
75312 + gr_attach_curr_ip(newsock->sk);
75313 +
75314 out_put:
75315 fput_light(sock->file, fput_needed);
75316 out:
75317 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
75318 int, addrlen)
75319 {
75320 struct socket *sock;
75321 + struct sockaddr *sck;
75322 struct sockaddr_storage address;
75323 int err, fput_needed;
75324
75325 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
75326 if (err < 0)
75327 goto out_put;
75328
75329 + sck = (struct sockaddr *)&address;
75330 +
75331 + if (gr_handle_sock_client(sck)) {
75332 + err = -EACCES;
75333 + goto out_put;
75334 + }
75335 +
75336 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
75337 + if (err)
75338 + goto out_put;
75339 +
75340 err =
75341 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
75342 if (err)
75343 @@ -1950,7 +2010,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
75344 * checking falls down on this.
75345 */
75346 if (copy_from_user(ctl_buf,
75347 - (void __user __force *)msg_sys->msg_control,
75348 + (void __force_user *)msg_sys->msg_control,
75349 ctl_len))
75350 goto out_freectl;
75351 msg_sys->msg_control = ctl_buf;
75352 @@ -2120,7 +2180,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
75353 * kernel msghdr to use the kernel address space)
75354 */
75355
75356 - uaddr = (__force void __user *)msg_sys->msg_name;
75357 + uaddr = (void __force_user *)msg_sys->msg_name;
75358 uaddr_len = COMPAT_NAMELEN(msg);
75359 if (MSG_CMSG_COMPAT & flags) {
75360 err = verify_compat_iovec(msg_sys, iov,
75361 @@ -2748,7 +2808,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75362 }
75363
75364 ifr = compat_alloc_user_space(buf_size);
75365 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
75366 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
75367
75368 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
75369 return -EFAULT;
75370 @@ -2772,12 +2832,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75371 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
75372
75373 if (copy_in_user(rxnfc, compat_rxnfc,
75374 - (void *)(&rxnfc->fs.m_ext + 1) -
75375 - (void *)rxnfc) ||
75376 + (void __user *)(&rxnfc->fs.m_ext + 1) -
75377 + (void __user *)rxnfc) ||
75378 copy_in_user(&rxnfc->fs.ring_cookie,
75379 &compat_rxnfc->fs.ring_cookie,
75380 - (void *)(&rxnfc->fs.location + 1) -
75381 - (void *)&rxnfc->fs.ring_cookie) ||
75382 + (void __user *)(&rxnfc->fs.location + 1) -
75383 + (void __user *)&rxnfc->fs.ring_cookie) ||
75384 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
75385 sizeof(rxnfc->rule_cnt)))
75386 return -EFAULT;
75387 @@ -2789,12 +2849,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75388
75389 if (convert_out) {
75390 if (copy_in_user(compat_rxnfc, rxnfc,
75391 - (const void *)(&rxnfc->fs.m_ext + 1) -
75392 - (const void *)rxnfc) ||
75393 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
75394 + (const void __user *)rxnfc) ||
75395 copy_in_user(&compat_rxnfc->fs.ring_cookie,
75396 &rxnfc->fs.ring_cookie,
75397 - (const void *)(&rxnfc->fs.location + 1) -
75398 - (const void *)&rxnfc->fs.ring_cookie) ||
75399 + (const void __user *)(&rxnfc->fs.location + 1) -
75400 + (const void __user *)&rxnfc->fs.ring_cookie) ||
75401 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
75402 sizeof(rxnfc->rule_cnt)))
75403 return -EFAULT;
75404 @@ -2864,7 +2924,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
75405 old_fs = get_fs();
75406 set_fs(KERNEL_DS);
75407 err = dev_ioctl(net, cmd,
75408 - (struct ifreq __user __force *) &kifr);
75409 + (struct ifreq __force_user *) &kifr);
75410 set_fs(old_fs);
75411
75412 return err;
75413 @@ -2973,7 +3033,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
75414
75415 old_fs = get_fs();
75416 set_fs(KERNEL_DS);
75417 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
75418 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
75419 set_fs(old_fs);
75420
75421 if (cmd == SIOCGIFMAP && !err) {
75422 @@ -3078,7 +3138,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
75423 ret |= __get_user(rtdev, &(ur4->rt_dev));
75424 if (rtdev) {
75425 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
75426 - r4.rt_dev = (char __user __force *)devname;
75427 + r4.rt_dev = (char __force_user *)devname;
75428 devname[15] = 0;
75429 } else
75430 r4.rt_dev = NULL;
75431 @@ -3318,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
75432 int __user *uoptlen;
75433 int err;
75434
75435 - uoptval = (char __user __force *) optval;
75436 - uoptlen = (int __user __force *) optlen;
75437 + uoptval = (char __force_user *) optval;
75438 + uoptlen = (int __force_user *) optlen;
75439
75440 set_fs(KERNEL_DS);
75441 if (level == SOL_SOCKET)
75442 @@ -3339,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
75443 char __user *uoptval;
75444 int err;
75445
75446 - uoptval = (char __user __force *) optval;
75447 + uoptval = (char __force_user *) optval;
75448
75449 set_fs(KERNEL_DS);
75450 if (level == SOL_SOCKET)
75451 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
75452 index 00a1a2a..6a0138a 100644
75453 --- a/net/sunrpc/sched.c
75454 +++ b/net/sunrpc/sched.c
75455 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
75456 #ifdef RPC_DEBUG
75457 static void rpc_task_set_debuginfo(struct rpc_task *task)
75458 {
75459 - static atomic_t rpc_pid;
75460 + static atomic_unchecked_t rpc_pid;
75461
75462 - task->tk_pid = atomic_inc_return(&rpc_pid);
75463 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
75464 }
75465 #else
75466 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
75467 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
75468 index 71bed1c..5dff36d 100644
75469 --- a/net/sunrpc/svcsock.c
75470 +++ b/net/sunrpc/svcsock.c
75471 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
75472 int buflen, unsigned int base)
75473 {
75474 size_t save_iovlen;
75475 - void __user *save_iovbase;
75476 + void *save_iovbase;
75477 unsigned int i;
75478 int ret;
75479
75480 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
75481 index 09af4fa..77110a9 100644
75482 --- a/net/sunrpc/xprtrdma/svc_rdma.c
75483 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
75484 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
75485 static unsigned int min_max_inline = 4096;
75486 static unsigned int max_max_inline = 65536;
75487
75488 -atomic_t rdma_stat_recv;
75489 -atomic_t rdma_stat_read;
75490 -atomic_t rdma_stat_write;
75491 -atomic_t rdma_stat_sq_starve;
75492 -atomic_t rdma_stat_rq_starve;
75493 -atomic_t rdma_stat_rq_poll;
75494 -atomic_t rdma_stat_rq_prod;
75495 -atomic_t rdma_stat_sq_poll;
75496 -atomic_t rdma_stat_sq_prod;
75497 +atomic_unchecked_t rdma_stat_recv;
75498 +atomic_unchecked_t rdma_stat_read;
75499 +atomic_unchecked_t rdma_stat_write;
75500 +atomic_unchecked_t rdma_stat_sq_starve;
75501 +atomic_unchecked_t rdma_stat_rq_starve;
75502 +atomic_unchecked_t rdma_stat_rq_poll;
75503 +atomic_unchecked_t rdma_stat_rq_prod;
75504 +atomic_unchecked_t rdma_stat_sq_poll;
75505 +atomic_unchecked_t rdma_stat_sq_prod;
75506
75507 /* Temporary NFS request map and context caches */
75508 struct kmem_cache *svc_rdma_map_cachep;
75509 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
75510 len -= *ppos;
75511 if (len > *lenp)
75512 len = *lenp;
75513 - if (len && copy_to_user(buffer, str_buf, len))
75514 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
75515 return -EFAULT;
75516 *lenp = len;
75517 *ppos += len;
75518 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
75519 {
75520 .procname = "rdma_stat_read",
75521 .data = &rdma_stat_read,
75522 - .maxlen = sizeof(atomic_t),
75523 + .maxlen = sizeof(atomic_unchecked_t),
75524 .mode = 0644,
75525 .proc_handler = read_reset_stat,
75526 },
75527 {
75528 .procname = "rdma_stat_recv",
75529 .data = &rdma_stat_recv,
75530 - .maxlen = sizeof(atomic_t),
75531 + .maxlen = sizeof(atomic_unchecked_t),
75532 .mode = 0644,
75533 .proc_handler = read_reset_stat,
75534 },
75535 {
75536 .procname = "rdma_stat_write",
75537 .data = &rdma_stat_write,
75538 - .maxlen = sizeof(atomic_t),
75539 + .maxlen = sizeof(atomic_unchecked_t),
75540 .mode = 0644,
75541 .proc_handler = read_reset_stat,
75542 },
75543 {
75544 .procname = "rdma_stat_sq_starve",
75545 .data = &rdma_stat_sq_starve,
75546 - .maxlen = sizeof(atomic_t),
75547 + .maxlen = sizeof(atomic_unchecked_t),
75548 .mode = 0644,
75549 .proc_handler = read_reset_stat,
75550 },
75551 {
75552 .procname = "rdma_stat_rq_starve",
75553 .data = &rdma_stat_rq_starve,
75554 - .maxlen = sizeof(atomic_t),
75555 + .maxlen = sizeof(atomic_unchecked_t),
75556 .mode = 0644,
75557 .proc_handler = read_reset_stat,
75558 },
75559 {
75560 .procname = "rdma_stat_rq_poll",
75561 .data = &rdma_stat_rq_poll,
75562 - .maxlen = sizeof(atomic_t),
75563 + .maxlen = sizeof(atomic_unchecked_t),
75564 .mode = 0644,
75565 .proc_handler = read_reset_stat,
75566 },
75567 {
75568 .procname = "rdma_stat_rq_prod",
75569 .data = &rdma_stat_rq_prod,
75570 - .maxlen = sizeof(atomic_t),
75571 + .maxlen = sizeof(atomic_unchecked_t),
75572 .mode = 0644,
75573 .proc_handler = read_reset_stat,
75574 },
75575 {
75576 .procname = "rdma_stat_sq_poll",
75577 .data = &rdma_stat_sq_poll,
75578 - .maxlen = sizeof(atomic_t),
75579 + .maxlen = sizeof(atomic_unchecked_t),
75580 .mode = 0644,
75581 .proc_handler = read_reset_stat,
75582 },
75583 {
75584 .procname = "rdma_stat_sq_prod",
75585 .data = &rdma_stat_sq_prod,
75586 - .maxlen = sizeof(atomic_t),
75587 + .maxlen = sizeof(atomic_unchecked_t),
75588 .mode = 0644,
75589 .proc_handler = read_reset_stat,
75590 },
75591 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75592 index df67211..c354b13 100644
75593 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75594 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75595 @@ -499,7 +499,7 @@ next_sge:
75596 svc_rdma_put_context(ctxt, 0);
75597 goto out;
75598 }
75599 - atomic_inc(&rdma_stat_read);
75600 + atomic_inc_unchecked(&rdma_stat_read);
75601
75602 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
75603 chl_map->ch[ch_no].count -= read_wr.num_sge;
75604 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75605 dto_q);
75606 list_del_init(&ctxt->dto_q);
75607 } else {
75608 - atomic_inc(&rdma_stat_rq_starve);
75609 + atomic_inc_unchecked(&rdma_stat_rq_starve);
75610 clear_bit(XPT_DATA, &xprt->xpt_flags);
75611 ctxt = NULL;
75612 }
75613 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75614 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
75615 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
75616 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
75617 - atomic_inc(&rdma_stat_recv);
75618 + atomic_inc_unchecked(&rdma_stat_recv);
75619
75620 /* Build up the XDR from the receive buffers. */
75621 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
75622 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75623 index 249a835..fb2794b 100644
75624 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75625 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75626 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
75627 write_wr.wr.rdma.remote_addr = to;
75628
75629 /* Post It */
75630 - atomic_inc(&rdma_stat_write);
75631 + atomic_inc_unchecked(&rdma_stat_write);
75632 if (svc_rdma_send(xprt, &write_wr))
75633 goto err;
75634 return 0;
75635 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75636 index ba1296d..0fec1a5 100644
75637 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
75638 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75639 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75640 return;
75641
75642 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
75643 - atomic_inc(&rdma_stat_rq_poll);
75644 + atomic_inc_unchecked(&rdma_stat_rq_poll);
75645
75646 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
75647 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
75648 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75649 }
75650
75651 if (ctxt)
75652 - atomic_inc(&rdma_stat_rq_prod);
75653 + atomic_inc_unchecked(&rdma_stat_rq_prod);
75654
75655 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
75656 /*
75657 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75658 return;
75659
75660 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
75661 - atomic_inc(&rdma_stat_sq_poll);
75662 + atomic_inc_unchecked(&rdma_stat_sq_poll);
75663 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
75664 if (wc.status != IB_WC_SUCCESS)
75665 /* Close the transport */
75666 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75667 }
75668
75669 if (ctxt)
75670 - atomic_inc(&rdma_stat_sq_prod);
75671 + atomic_inc_unchecked(&rdma_stat_sq_prod);
75672 }
75673
75674 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
75675 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
75676 spin_lock_bh(&xprt->sc_lock);
75677 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
75678 spin_unlock_bh(&xprt->sc_lock);
75679 - atomic_inc(&rdma_stat_sq_starve);
75680 + atomic_inc_unchecked(&rdma_stat_sq_starve);
75681
75682 /* See if we can opportunistically reap SQ WR to make room */
75683 sq_cq_reap(xprt);
75684 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
75685 index e758139..d29ea47 100644
75686 --- a/net/sysctl_net.c
75687 +++ b/net/sysctl_net.c
75688 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
75689 struct ctl_table *table)
75690 {
75691 /* Allow network administrator to have same access as root. */
75692 - if (capable(CAP_NET_ADMIN)) {
75693 + if (capable_nolog(CAP_NET_ADMIN)) {
75694 int mode = (table->mode >> 6) & 7;
75695 return (mode << 6) | (mode << 3) | mode;
75696 }
75697 diff --git a/net/tipc/link.c b/net/tipc/link.c
75698 index ae98a72..7bb6056 100644
75699 --- a/net/tipc/link.c
75700 +++ b/net/tipc/link.c
75701 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
75702 struct tipc_msg fragm_hdr;
75703 struct sk_buff *buf, *buf_chain, *prev;
75704 u32 fragm_crs, fragm_rest, hsz, sect_rest;
75705 - const unchar *sect_crs;
75706 + const unchar __user *sect_crs;
75707 int curr_sect;
75708 u32 fragm_no;
75709
75710 @@ -1247,7 +1247,7 @@ again:
75711
75712 if (!sect_rest) {
75713 sect_rest = msg_sect[++curr_sect].iov_len;
75714 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
75715 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
75716 }
75717
75718 if (sect_rest < fragm_rest)
75719 @@ -1266,7 +1266,7 @@ error:
75720 }
75721 } else
75722 skb_copy_to_linear_data_offset(buf, fragm_crs,
75723 - sect_crs, sz);
75724 + (const void __force_kernel *)sect_crs, sz);
75725 sect_crs += sz;
75726 sect_rest -= sz;
75727 fragm_crs += sz;
75728 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
75729 index 83d5096..dcba497 100644
75730 --- a/net/tipc/msg.c
75731 +++ b/net/tipc/msg.c
75732 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
75733 msg_sect[cnt].iov_len);
75734 else
75735 skb_copy_to_linear_data_offset(*buf, pos,
75736 - msg_sect[cnt].iov_base,
75737 + (const void __force_kernel *)msg_sect[cnt].iov_base,
75738 msg_sect[cnt].iov_len);
75739 pos += msg_sect[cnt].iov_len;
75740 }
75741 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
75742 index 1983717..4d6102c 100644
75743 --- a/net/tipc/subscr.c
75744 +++ b/net/tipc/subscr.c
75745 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
75746 {
75747 struct iovec msg_sect;
75748
75749 - msg_sect.iov_base = (void *)&sub->evt;
75750 + msg_sect.iov_base = (void __force_user *)&sub->evt;
75751 msg_sect.iov_len = sizeof(struct tipc_event);
75752
75753 sub->evt.event = htohl(event, sub->swap);
75754 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
75755 index d99678a..3514a21 100644
75756 --- a/net/unix/af_unix.c
75757 +++ b/net/unix/af_unix.c
75758 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
75759 err = -ECONNREFUSED;
75760 if (!S_ISSOCK(inode->i_mode))
75761 goto put_fail;
75762 +
75763 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
75764 + err = -EACCES;
75765 + goto put_fail;
75766 + }
75767 +
75768 u = unix_find_socket_byinode(inode);
75769 if (!u)
75770 goto put_fail;
75771 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
75772 if (u) {
75773 struct dentry *dentry;
75774 dentry = unix_sk(u)->dentry;
75775 +
75776 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
75777 + err = -EPERM;
75778 + sock_put(u);
75779 + goto fail;
75780 + }
75781 +
75782 if (dentry)
75783 touch_atime(unix_sk(u)->mnt, dentry);
75784 } else
75785 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
75786 err = security_path_mknod(&path, dentry, mode, 0);
75787 if (err)
75788 goto out_mknod_drop_write;
75789 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
75790 + err = -EACCES;
75791 + goto out_mknod_drop_write;
75792 + }
75793 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
75794 out_mknod_drop_write:
75795 mnt_drop_write(path.mnt);
75796 if (err)
75797 goto out_mknod_dput;
75798 +
75799 + gr_handle_create(dentry, path.mnt);
75800 +
75801 mutex_unlock(&path.dentry->d_inode->i_mutex);
75802 dput(path.dentry);
75803 path.dentry = dentry;
75804 diff --git a/net/wireless/core.h b/net/wireless/core.h
75805 index b9ec306..b4a563e 100644
75806 --- a/net/wireless/core.h
75807 +++ b/net/wireless/core.h
75808 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
75809 struct mutex mtx;
75810
75811 /* rfkill support */
75812 - struct rfkill_ops rfkill_ops;
75813 + rfkill_ops_no_const rfkill_ops;
75814 struct rfkill *rfkill;
75815 struct work_struct rfkill_sync;
75816
75817 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
75818 index 0af7f54..c916d2f 100644
75819 --- a/net/wireless/wext-core.c
75820 +++ b/net/wireless/wext-core.c
75821 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75822 */
75823
75824 /* Support for very large requests */
75825 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
75826 - (user_length > descr->max_tokens)) {
75827 + if (user_length > descr->max_tokens) {
75828 /* Allow userspace to GET more than max so
75829 * we can support any size GET requests.
75830 * There is still a limit : -ENOMEM.
75831 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75832 }
75833 }
75834
75835 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
75836 - /*
75837 - * If this is a GET, but not NOMAX, it means that the extra
75838 - * data is not bounded by userspace, but by max_tokens. Thus
75839 - * set the length to max_tokens. This matches the extra data
75840 - * allocation.
75841 - * The driver should fill it with the number of tokens it
75842 - * provided, and it may check iwp->length rather than having
75843 - * knowledge of max_tokens. If the driver doesn't change the
75844 - * iwp->length, this ioctl just copies back max_token tokens
75845 - * filled with zeroes. Hopefully the driver isn't claiming
75846 - * them to be valid data.
75847 - */
75848 - iwp->length = descr->max_tokens;
75849 - }
75850 -
75851 err = handler(dev, info, (union iwreq_data *) iwp, extra);
75852
75853 iwp->length += essid_compat;
75854 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
75855 index 9049a5c..cfa6f5c 100644
75856 --- a/net/xfrm/xfrm_policy.c
75857 +++ b/net/xfrm/xfrm_policy.c
75858 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
75859 {
75860 policy->walk.dead = 1;
75861
75862 - atomic_inc(&policy->genid);
75863 + atomic_inc_unchecked(&policy->genid);
75864
75865 if (del_timer(&policy->timer))
75866 xfrm_pol_put(policy);
75867 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
75868 hlist_add_head(&policy->bydst, chain);
75869 xfrm_pol_hold(policy);
75870 net->xfrm.policy_count[dir]++;
75871 - atomic_inc(&flow_cache_genid);
75872 + atomic_inc_unchecked(&flow_cache_genid);
75873 if (delpol)
75874 __xfrm_policy_unlink(delpol, dir);
75875 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
75876 @@ -1530,7 +1530,7 @@ free_dst:
75877 goto out;
75878 }
75879
75880 -static int inline
75881 +static inline int
75882 xfrm_dst_alloc_copy(void **target, const void *src, int size)
75883 {
75884 if (!*target) {
75885 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
75886 return 0;
75887 }
75888
75889 -static int inline
75890 +static inline int
75891 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75892 {
75893 #ifdef CONFIG_XFRM_SUB_POLICY
75894 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75895 #endif
75896 }
75897
75898 -static int inline
75899 +static inline int
75900 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
75901 {
75902 #ifdef CONFIG_XFRM_SUB_POLICY
75903 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
75904
75905 xdst->num_pols = num_pols;
75906 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
75907 - xdst->policy_genid = atomic_read(&pols[0]->genid);
75908 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
75909
75910 return xdst;
75911 }
75912 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
75913 if (xdst->xfrm_genid != dst->xfrm->genid)
75914 return 0;
75915 if (xdst->num_pols > 0 &&
75916 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
75917 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
75918 return 0;
75919
75920 mtu = dst_mtu(dst->child);
75921 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
75922 sizeof(pol->xfrm_vec[i].saddr));
75923 pol->xfrm_vec[i].encap_family = mp->new_family;
75924 /* flush bundles */
75925 - atomic_inc(&pol->genid);
75926 + atomic_inc_unchecked(&pol->genid);
75927 }
75928 }
75929
75930 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
75931 index d2b366c..51ff91e 100644
75932 --- a/scripts/Makefile.build
75933 +++ b/scripts/Makefile.build
75934 @@ -109,7 +109,7 @@ endif
75935 endif
75936
75937 # Do not include host rules unless needed
75938 -ifneq ($(hostprogs-y)$(hostprogs-m),)
75939 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
75940 include scripts/Makefile.host
75941 endif
75942
75943 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
75944 index 686cb0d..9d653bf 100644
75945 --- a/scripts/Makefile.clean
75946 +++ b/scripts/Makefile.clean
75947 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
75948 __clean-files := $(extra-y) $(always) \
75949 $(targets) $(clean-files) \
75950 $(host-progs) \
75951 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
75952 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
75953 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
75954
75955 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
75956
75957 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
75958 index 1ac414f..a1c1451 100644
75959 --- a/scripts/Makefile.host
75960 +++ b/scripts/Makefile.host
75961 @@ -31,6 +31,7 @@
75962 # Note: Shared libraries consisting of C++ files are not supported
75963
75964 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
75965 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
75966
75967 # C code
75968 # Executables compiled from a single .c file
75969 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
75970 # Shared libaries (only .c supported)
75971 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
75972 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
75973 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
75974 # Remove .so files from "xxx-objs"
75975 host-cobjs := $(filter-out %.so,$(host-cobjs))
75976
75977 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
75978 index cb1f50c..cef2a7c 100644
75979 --- a/scripts/basic/fixdep.c
75980 +++ b/scripts/basic/fixdep.c
75981 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
75982 /*
75983 * Lookup a value in the configuration string.
75984 */
75985 -static int is_defined_config(const char *name, int len, unsigned int hash)
75986 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
75987 {
75988 struct item *aux;
75989
75990 @@ -211,10 +211,10 @@ static void clear_config(void)
75991 /*
75992 * Record the use of a CONFIG_* word.
75993 */
75994 -static void use_config(const char *m, int slen)
75995 +static void use_config(const char *m, unsigned int slen)
75996 {
75997 unsigned int hash = strhash(m, slen);
75998 - int c, i;
75999 + unsigned int c, i;
76000
76001 if (is_defined_config(m, slen, hash))
76002 return;
76003 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
76004
76005 static void parse_config_file(const char *map, size_t len)
76006 {
76007 - const int *end = (const int *) (map + len);
76008 + const unsigned int *end = (const unsigned int *) (map + len);
76009 /* start at +1, so that p can never be < map */
76010 - const int *m = (const int *) map + 1;
76011 + const unsigned int *m = (const unsigned int *) map + 1;
76012 const char *p, *q;
76013
76014 for (; m < end; m++) {
76015 @@ -406,7 +406,7 @@ static void print_deps(void)
76016 static void traps(void)
76017 {
76018 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
76019 - int *p = (int *)test;
76020 + unsigned int *p = (unsigned int *)test;
76021
76022 if (*p != INT_CONF) {
76023 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
76024 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
76025 new file mode 100644
76026 index 0000000..8729101
76027 --- /dev/null
76028 +++ b/scripts/gcc-plugin.sh
76029 @@ -0,0 +1,2 @@
76030 +#!/bin/sh
76031 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
76032 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
76033 index f936d1f..a66d95f 100644
76034 --- a/scripts/mod/file2alias.c
76035 +++ b/scripts/mod/file2alias.c
76036 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
76037 unsigned long size, unsigned long id_size,
76038 void *symval)
76039 {
76040 - int i;
76041 + unsigned int i;
76042
76043 if (size % id_size || size < id_size) {
76044 if (cross_build != 0)
76045 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
76046 /* USB is special because the bcdDevice can be matched against a numeric range */
76047 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
76048 static void do_usb_entry(struct usb_device_id *id,
76049 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
76050 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
76051 unsigned char range_lo, unsigned char range_hi,
76052 unsigned char max, struct module *mod)
76053 {
76054 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
76055 {
76056 unsigned int devlo, devhi;
76057 unsigned char chi, clo, max;
76058 - int ndigits;
76059 + unsigned int ndigits;
76060
76061 id->match_flags = TO_NATIVE(id->match_flags);
76062 id->idVendor = TO_NATIVE(id->idVendor);
76063 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
76064 for (i = 0; i < count; i++) {
76065 const char *id = (char *)devs[i].id;
76066 char acpi_id[sizeof(devs[0].id)];
76067 - int j;
76068 + unsigned int j;
76069
76070 buf_printf(&mod->dev_table_buf,
76071 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
76072 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
76073
76074 for (j = 0; j < PNP_MAX_DEVICES; j++) {
76075 const char *id = (char *)card->devs[j].id;
76076 - int i2, j2;
76077 + unsigned int i2, j2;
76078 int dup = 0;
76079
76080 if (!id[0])
76081 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
76082 /* add an individual alias for every device entry */
76083 if (!dup) {
76084 char acpi_id[sizeof(card->devs[0].id)];
76085 - int k;
76086 + unsigned int k;
76087
76088 buf_printf(&mod->dev_table_buf,
76089 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
76090 @@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s)
76091 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
76092 char *alias)
76093 {
76094 - int i, j;
76095 + unsigned int i, j;
76096
76097 sprintf(alias, "dmi*");
76098
76099 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
76100 index 2bd594e..d43245e 100644
76101 --- a/scripts/mod/modpost.c
76102 +++ b/scripts/mod/modpost.c
76103 @@ -919,6 +919,7 @@ enum mismatch {
76104 ANY_INIT_TO_ANY_EXIT,
76105 ANY_EXIT_TO_ANY_INIT,
76106 EXPORT_TO_INIT_EXIT,
76107 + DATA_TO_TEXT
76108 };
76109
76110 struct sectioncheck {
76111 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
76112 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
76113 .mismatch = EXPORT_TO_INIT_EXIT,
76114 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
76115 +},
76116 +/* Do not reference code from writable data */
76117 +{
76118 + .fromsec = { DATA_SECTIONS, NULL },
76119 + .tosec = { TEXT_SECTIONS, NULL },
76120 + .mismatch = DATA_TO_TEXT
76121 }
76122 };
76123
76124 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
76125 continue;
76126 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
76127 continue;
76128 - if (sym->st_value == addr)
76129 - return sym;
76130 /* Find a symbol nearby - addr are maybe negative */
76131 d = sym->st_value - addr;
76132 + if (d == 0)
76133 + return sym;
76134 if (d < 0)
76135 d = addr - sym->st_value;
76136 if (d < distance) {
76137 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
76138 tosym, prl_to, prl_to, tosym);
76139 free(prl_to);
76140 break;
76141 + case DATA_TO_TEXT:
76142 +/*
76143 + fprintf(stderr,
76144 + "The variable %s references\n"
76145 + "the %s %s%s%s\n",
76146 + fromsym, to, sec2annotation(tosec), tosym, to_p);
76147 +*/
76148 + break;
76149 }
76150 fprintf(stderr, "\n");
76151 }
76152 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
76153 static void check_sec_ref(struct module *mod, const char *modname,
76154 struct elf_info *elf)
76155 {
76156 - int i;
76157 + unsigned int i;
76158 Elf_Shdr *sechdrs = elf->sechdrs;
76159
76160 /* Walk through all sections */
76161 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
76162 va_end(ap);
76163 }
76164
76165 -void buf_write(struct buffer *buf, const char *s, int len)
76166 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
76167 {
76168 if (buf->size - buf->pos < len) {
76169 buf->size += len + SZ;
76170 @@ -1972,7 +1987,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
76171 if (fstat(fileno(file), &st) < 0)
76172 goto close_write;
76173
76174 - if (st.st_size != b->pos)
76175 + if (st.st_size != (off_t)b->pos)
76176 goto close_write;
76177
76178 tmp = NOFAIL(malloc(b->pos));
76179 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
76180 index 2031119..b5433af 100644
76181 --- a/scripts/mod/modpost.h
76182 +++ b/scripts/mod/modpost.h
76183 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
76184
76185 struct buffer {
76186 char *p;
76187 - int pos;
76188 - int size;
76189 + unsigned int pos;
76190 + unsigned int size;
76191 };
76192
76193 void __attribute__((format(printf, 2, 3)))
76194 buf_printf(struct buffer *buf, const char *fmt, ...);
76195
76196 void
76197 -buf_write(struct buffer *buf, const char *s, int len);
76198 +buf_write(struct buffer *buf, const char *s, unsigned int len);
76199
76200 struct module {
76201 struct module *next;
76202 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
76203 index 9dfcd6d..099068e 100644
76204 --- a/scripts/mod/sumversion.c
76205 +++ b/scripts/mod/sumversion.c
76206 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
76207 goto out;
76208 }
76209
76210 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
76211 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
76212 warn("writing sum in %s failed: %s\n",
76213 filename, strerror(errno));
76214 goto out;
76215 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
76216 index 5c11312..72742b5 100644
76217 --- a/scripts/pnmtologo.c
76218 +++ b/scripts/pnmtologo.c
76219 @@ -237,14 +237,14 @@ static void write_header(void)
76220 fprintf(out, " * Linux logo %s\n", logoname);
76221 fputs(" */\n\n", out);
76222 fputs("#include <linux/linux_logo.h>\n\n", out);
76223 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
76224 + fprintf(out, "static unsigned char %s_data[] = {\n",
76225 logoname);
76226 }
76227
76228 static void write_footer(void)
76229 {
76230 fputs("\n};\n\n", out);
76231 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
76232 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
76233 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
76234 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
76235 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
76236 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
76237 fputs("\n};\n\n", out);
76238
76239 /* write logo clut */
76240 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
76241 + fprintf(out, "static unsigned char %s_clut[] = {\n",
76242 logoname);
76243 write_hex_cnt = 0;
76244 for (i = 0; i < logo_clutsize; i++) {
76245 diff --git a/security/Kconfig b/security/Kconfig
76246 index 51bd5a0..eeabc9f 100644
76247 --- a/security/Kconfig
76248 +++ b/security/Kconfig
76249 @@ -4,6 +4,627 @@
76250
76251 menu "Security options"
76252
76253 +source grsecurity/Kconfig
76254 +
76255 +menu "PaX"
76256 +
76257 + config ARCH_TRACK_EXEC_LIMIT
76258 + bool
76259 +
76260 + config PAX_KERNEXEC_PLUGIN
76261 + bool
76262 +
76263 + config PAX_PER_CPU_PGD
76264 + bool
76265 +
76266 + config TASK_SIZE_MAX_SHIFT
76267 + int
76268 + depends on X86_64
76269 + default 47 if !PAX_PER_CPU_PGD
76270 + default 42 if PAX_PER_CPU_PGD
76271 +
76272 + config PAX_ENABLE_PAE
76273 + bool
76274 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
76275 +
76276 +config PAX
76277 + bool "Enable various PaX features"
76278 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
76279 + help
76280 + This allows you to enable various PaX features. PaX adds
76281 + intrusion prevention mechanisms to the kernel that reduce
76282 + the risks posed by exploitable memory corruption bugs.
76283 +
76284 +menu "PaX Control"
76285 + depends on PAX
76286 +
76287 +config PAX_SOFTMODE
76288 + bool 'Support soft mode'
76289 + help
76290 + Enabling this option will allow you to run PaX in soft mode, that
76291 + is, PaX features will not be enforced by default, only on executables
76292 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
76293 + support as they are the only way to mark executables for soft mode use.
76294 +
76295 + Soft mode can be activated by using the "pax_softmode=1" kernel command
76296 + line option on boot. Furthermore you can control various PaX features
76297 + at runtime via the entries in /proc/sys/kernel/pax.
76298 +
76299 +config PAX_EI_PAX
76300 + bool 'Use legacy ELF header marking'
76301 + help
76302 + Enabling this option will allow you to control PaX features on
76303 + a per executable basis via the 'chpax' utility available at
76304 + http://pax.grsecurity.net/. The control flags will be read from
76305 + an otherwise reserved part of the ELF header. This marking has
76306 + numerous drawbacks (no support for soft-mode, toolchain does not
76307 + know about the non-standard use of the ELF header) therefore it
76308 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
76309 + support.
76310 +
76311 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76312 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
76313 + option otherwise they will not get any protection.
76314 +
76315 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
76316 + support as well, they will override the legacy EI_PAX marks.
76317 +
76318 +config PAX_PT_PAX_FLAGS
76319 + bool 'Use ELF program header marking'
76320 + help
76321 + Enabling this option will allow you to control PaX features on
76322 + a per executable basis via the 'paxctl' utility available at
76323 + http://pax.grsecurity.net/. The control flags will be read from
76324 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
76325 + has the benefits of supporting both soft mode and being fully
76326 + integrated into the toolchain (the binutils patch is available
76327 + from http://pax.grsecurity.net).
76328 +
76329 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76330 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
76331 + support otherwise they will not get any protection.
76332 +
76333 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
76334 + must make sure that the marks are the same if a binary has both marks.
76335 +
76336 + Note that if you enable the legacy EI_PAX marking support as well,
76337 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
76338 +
76339 +config PAX_XATTR_PAX_FLAGS
76340 + bool 'Use filesystem extended attributes marking'
76341 + depends on EXPERT
76342 + select CIFS_XATTR if CIFS
76343 + select EXT2_FS_XATTR if EXT2_FS
76344 + select EXT3_FS_XATTR if EXT3_FS
76345 + select EXT4_FS_XATTR if EXT4_FS
76346 + select JFFS2_FS_XATTR if JFFS2_FS
76347 + select REISERFS_FS_XATTR if REISERFS_FS
76348 + select SQUASHFS_XATTR if SQUASHFS
76349 + select TMPFS_XATTR if TMPFS
76350 + select UBIFS_FS_XATTR if UBIFS_FS
76351 + help
76352 + Enabling this option will allow you to control PaX features on
76353 + a per executable basis via the 'setfattr' utility. The control
76354 + flags will be read from the user.pax.flags extended attribute of
76355 + the file. This marking has the benefit of supporting binary-only
76356 + applications that self-check themselves (e.g., skype) and would
76357 + not tolerate chpax/paxctl changes. The main drawback is that
76358 + extended attributes are not supported by some filesystems (e.g.,
76359 + isofs, udf, vfat) so copying files through such filesystems will
76360 + lose the extended attributes and these PaX markings.
76361 +
76362 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76363 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
76364 + support otherwise they will not get any protection.
76365 +
76366 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
76367 + must make sure that the marks are the same if a binary has both marks.
76368 +
76369 + Note that if you enable the legacy EI_PAX marking support as well,
76370 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
76371 +
76372 +choice
76373 + prompt 'MAC system integration'
76374 + default PAX_HAVE_ACL_FLAGS
76375 + help
76376 + Mandatory Access Control systems have the option of controlling
76377 + PaX flags on a per executable basis, choose the method supported
76378 + by your particular system.
76379 +
76380 + - "none": if your MAC system does not interact with PaX,
76381 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
76382 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
76383 +
76384 + NOTE: this option is for developers/integrators only.
76385 +
76386 + config PAX_NO_ACL_FLAGS
76387 + bool 'none'
76388 +
76389 + config PAX_HAVE_ACL_FLAGS
76390 + bool 'direct'
76391 +
76392 + config PAX_HOOK_ACL_FLAGS
76393 + bool 'hook'
76394 +endchoice
76395 +
76396 +endmenu
76397 +
76398 +menu "Non-executable pages"
76399 + depends on PAX
76400 +
76401 +config PAX_NOEXEC
76402 + bool "Enforce non-executable pages"
76403 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
76404 + help
76405 + By design some architectures do not allow for protecting memory
76406 + pages against execution or even if they do, Linux does not make
76407 + use of this feature. In practice this means that if a page is
76408 + readable (such as the stack or heap) it is also executable.
76409 +
76410 + There is a well known exploit technique that makes use of this
76411 + fact and a common programming mistake where an attacker can
76412 + introduce code of his choice somewhere in the attacked program's
76413 + memory (typically the stack or the heap) and then execute it.
76414 +
76415 + If the attacked program was running with different (typically
76416 + higher) privileges than that of the attacker, then he can elevate
76417 + his own privilege level (e.g. get a root shell, write to files for
76418 + which he does not have write access to, etc).
76419 +
76420 + Enabling this option will let you choose from various features
76421 + that prevent the injection and execution of 'foreign' code in
76422 + a program.
76423 +
76424 + This will also break programs that rely on the old behaviour and
76425 + expect that dynamically allocated memory via the malloc() family
76426 + of functions is executable (which it is not). Notable examples
76427 + are the XFree86 4.x server, the java runtime and wine.
76428 +
76429 +config PAX_PAGEEXEC
76430 + bool "Paging based non-executable pages"
76431 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
76432 + select S390_SWITCH_AMODE if S390
76433 + select S390_EXEC_PROTECT if S390
76434 + select ARCH_TRACK_EXEC_LIMIT if X86_32
76435 + help
76436 + This implementation is based on the paging feature of the CPU.
76437 + On i386 without hardware non-executable bit support there is a
76438 + variable but usually low performance impact, however on Intel's
76439 + P4 core based CPUs it is very high so you should not enable this
76440 + for kernels meant to be used on such CPUs.
76441 +
76442 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
76443 + with hardware non-executable bit support there is no performance
76444 + impact, on ppc the impact is negligible.
76445 +
76446 + Note that several architectures require various emulations due to
76447 + badly designed userland ABIs, this will cause a performance impact
76448 + but will disappear as soon as userland is fixed. For example, ppc
76449 + userland MUST have been built with secure-plt by a recent toolchain.
76450 +
76451 +config PAX_SEGMEXEC
76452 + bool "Segmentation based non-executable pages"
76453 + depends on PAX_NOEXEC && X86_32
76454 + help
76455 + This implementation is based on the segmentation feature of the
76456 + CPU and has a very small performance impact, however applications
76457 + will be limited to a 1.5 GB address space instead of the normal
76458 + 3 GB.
76459 +
76460 +config PAX_EMUTRAMP
76461 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
76462 + default y if PARISC
76463 + help
76464 + There are some programs and libraries that for one reason or
76465 + another attempt to execute special small code snippets from
76466 + non-executable memory pages. Most notable examples are the
76467 + signal handler return code generated by the kernel itself and
76468 + the GCC trampolines.
76469 +
76470 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
76471 + such programs will no longer work under your kernel.
76472 +
76473 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
76474 + utilities to enable trampoline emulation for the affected programs
76475 + yet still have the protection provided by the non-executable pages.
76476 +
76477 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
76478 + your system will not even boot.
76479 +
76480 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
76481 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
76482 + for the affected files.
76483 +
76484 + NOTE: enabling this feature *may* open up a loophole in the
76485 + protection provided by non-executable pages that an attacker
76486 + could abuse. Therefore the best solution is to not have any
76487 + files on your system that would require this option. This can
76488 + be achieved by not using libc5 (which relies on the kernel
76489 + signal handler return code) and not using or rewriting programs
76490 + that make use of the nested function implementation of GCC.
76491 + Skilled users can just fix GCC itself so that it implements
76492 + nested function calls in a way that does not interfere with PaX.
76493 +
76494 +config PAX_EMUSIGRT
76495 + bool "Automatically emulate sigreturn trampolines"
76496 + depends on PAX_EMUTRAMP && PARISC
76497 + default y
76498 + help
76499 + Enabling this option will have the kernel automatically detect
76500 + and emulate signal return trampolines executing on the stack
76501 + that would otherwise lead to task termination.
76502 +
76503 + This solution is intended as a temporary one for users with
76504 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
76505 + Modula-3 runtime, etc) or executables linked to such, basically
76506 + everything that does not specify its own SA_RESTORER function in
76507 + normal executable memory like glibc 2.1+ does.
76508 +
76509 + On parisc you MUST enable this option, otherwise your system will
76510 + not even boot.
76511 +
76512 + NOTE: this feature cannot be disabled on a per executable basis
76513 + and since it *does* open up a loophole in the protection provided
76514 + by non-executable pages, the best solution is to not have any
76515 + files on your system that would require this option.
76516 +
76517 +config PAX_MPROTECT
76518 + bool "Restrict mprotect()"
76519 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
76520 + help
76521 + Enabling this option will prevent programs from
76522 + - changing the executable status of memory pages that were
76523 + not originally created as executable,
76524 + - making read-only executable pages writable again,
76525 + - creating executable pages from anonymous memory,
76526 + - making read-only-after-relocations (RELRO) data pages writable again.
76527 +
76528 + You should say Y here to complete the protection provided by
76529 + the enforcement of non-executable pages.
76530 +
76531 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76532 + this feature on a per file basis.
76533 +
76534 +config PAX_MPROTECT_COMPAT
76535 + bool "Use legacy/compat protection demoting (read help)"
76536 + depends on PAX_MPROTECT
76537 + default n
76538 + help
76539 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
76540 + by sending the proper error code to the application. For some broken
76541 + userland, this can cause problems with Python or other applications. The
76542 + current implementation however allows for applications like clamav to
76543 + detect if JIT compilation/execution is allowed and to fall back gracefully
76544 + to an interpreter-based mode if it does not. While we encourage everyone
76545 + to use the current implementation as-is and push upstream to fix broken
76546 + userland (note that the RWX logging option can assist with this), in some
76547 + environments this may not be possible. Having to disable MPROTECT
76548 + completely on certain binaries reduces the security benefit of PaX,
76549 + so this option is provided for those environments to revert to the old
76550 + behavior.
76551 +
76552 +config PAX_ELFRELOCS
76553 + bool "Allow ELF text relocations (read help)"
76554 + depends on PAX_MPROTECT
76555 + default n
76556 + help
76557 + Non-executable pages and mprotect() restrictions are effective
76558 + in preventing the introduction of new executable code into an
76559 + attacked task's address space. There remain only two venues
76560 + for this kind of attack: if the attacker can execute already
76561 + existing code in the attacked task then he can either have it
76562 + create and mmap() a file containing his code or have it mmap()
76563 + an already existing ELF library that does not have position
76564 + independent code in it and use mprotect() on it to make it
76565 + writable and copy his code there. While protecting against
76566 + the former approach is beyond PaX, the latter can be prevented
76567 + by having only PIC ELF libraries on one's system (which do not
76568 + need to relocate their code). If you are sure this is your case,
76569 + as is the case with all modern Linux distributions, then leave
76570 + this option disabled. You should say 'n' here.
76571 +
76572 +config PAX_ETEXECRELOCS
76573 + bool "Allow ELF ET_EXEC text relocations"
76574 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
76575 + select PAX_ELFRELOCS
76576 + default y
76577 + help
76578 + On some architectures there are incorrectly created applications
76579 + that require text relocations and would not work without enabling
76580 + this option. If you are an alpha, ia64 or parisc user, you should
76581 + enable this option and disable it once you have made sure that
76582 + none of your applications need it.
76583 +
76584 +config PAX_EMUPLT
76585 + bool "Automatically emulate ELF PLT"
76586 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
76587 + default y
76588 + help
76589 + Enabling this option will have the kernel automatically detect
76590 + and emulate the Procedure Linkage Table entries in ELF files.
76591 + On some architectures such entries are in writable memory, and
76592 + become non-executable leading to task termination. Therefore
76593 + it is mandatory that you enable this option on alpha, parisc,
76594 + sparc and sparc64, otherwise your system would not even boot.
76595 +
76596 + NOTE: this feature *does* open up a loophole in the protection
76597 + provided by the non-executable pages, therefore the proper
76598 + solution is to modify the toolchain to produce a PLT that does
76599 + not need to be writable.
76600 +
76601 +config PAX_DLRESOLVE
76602 + bool 'Emulate old glibc resolver stub'
76603 + depends on PAX_EMUPLT && SPARC
76604 + default n
76605 + help
76606 + This option is needed if userland has an old glibc (before 2.4)
76607 + that puts a 'save' instruction into the runtime generated resolver
76608 + stub that needs special emulation.
76609 +
76610 +config PAX_KERNEXEC
76611 + bool "Enforce non-executable kernel pages"
76612 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
76613 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
76614 + select PAX_KERNEXEC_PLUGIN if X86_64
76615 + help
76616 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
76617 + that is, enabling this option will make it harder to inject
76618 + and execute 'foreign' code in kernel memory itself.
76619 +
76620 + Note that on x86_64 kernels there is a known regression when
76621 + this feature and KVM/VMX are both enabled in the host kernel.
76622 +
76623 +choice
76624 + prompt "Return Address Instrumentation Method"
76625 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
76626 + depends on PAX_KERNEXEC_PLUGIN
76627 + help
76628 + Select the method used to instrument function pointer dereferences.
76629 + Note that binary modules cannot be instrumented by this approach.
76630 +
76631 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
76632 + bool "bts"
76633 + help
76634 + This method is compatible with binary only modules but has
76635 + a higher runtime overhead.
76636 +
76637 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
76638 + bool "or"
76639 + depends on !PARAVIRT
76640 + help
76641 + This method is incompatible with binary only modules but has
76642 + a lower runtime overhead.
76643 +endchoice
76644 +
76645 +config PAX_KERNEXEC_PLUGIN_METHOD
76646 + string
76647 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
76648 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
76649 + default ""
76650 +
76651 +config PAX_KERNEXEC_MODULE_TEXT
76652 + int "Minimum amount of memory reserved for module code"
76653 + default "4"
76654 + depends on PAX_KERNEXEC && X86_32 && MODULES
76655 + help
76656 + Due to implementation details the kernel must reserve a fixed
76657 + amount of memory for module code at compile time that cannot be
76658 + changed at runtime. Here you can specify the minimum amount
76659 + in MB that will be reserved. Due to the same implementation
76660 + details this size will always be rounded up to the next 2/4 MB
76661 + boundary (depends on PAE) so the actually available memory for
76662 + module code will usually be more than this minimum.
76663 +
76664 + The default 4 MB should be enough for most users but if you have
76665 + an excessive number of modules (e.g., most distribution configs
76666 + compile many drivers as modules) or use huge modules such as
76667 + nvidia's kernel driver, you will need to adjust this amount.
76668 + A good rule of thumb is to look at your currently loaded kernel
76669 + modules and add up their sizes.
76670 +
76671 +endmenu
76672 +
76673 +menu "Address Space Layout Randomization"
76674 + depends on PAX
76675 +
76676 +config PAX_ASLR
76677 + bool "Address Space Layout Randomization"
76678 + help
76679 + Many if not most exploit techniques rely on the knowledge of
76680 + certain addresses in the attacked program. The following options
76681 + will allow the kernel to apply a certain amount of randomization
76682 + to specific parts of the program thereby forcing an attacker to
76683 + guess them in most cases. Any failed guess will most likely crash
76684 + the attacked program which allows the kernel to detect such attempts
76685 + and react on them. PaX itself provides no reaction mechanisms,
76686 + instead it is strongly encouraged that you make use of Nergal's
76687 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
76688 + (http://www.grsecurity.net/) built-in crash detection features or
76689 + develop one yourself.
76690 +
76691 + By saying Y here you can choose to randomize the following areas:
76692 + - top of the task's kernel stack
76693 + - top of the task's userland stack
76694 + - base address for mmap() requests that do not specify one
76695 + (this includes all libraries)
76696 + - base address of the main executable
76697 +
76698 + It is strongly recommended to say Y here as address space layout
76699 + randomization has negligible impact on performance yet it provides
76700 + a very effective protection.
76701 +
76702 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76703 + this feature on a per file basis.
76704 +
76705 +config PAX_RANDKSTACK
76706 + bool "Randomize kernel stack base"
76707 + depends on X86_TSC && X86
76708 + help
76709 + By saying Y here the kernel will randomize every task's kernel
76710 + stack on every system call. This will not only force an attacker
76711 + to guess it but also prevent him from making use of possible
76712 + leaked information about it.
76713 +
76714 + Since the kernel stack is a rather scarce resource, randomization
76715 + may cause unexpected stack overflows, therefore you should very
76716 + carefully test your system. Note that once enabled in the kernel
76717 + configuration, this feature cannot be disabled on a per file basis.
76718 +
76719 +config PAX_RANDUSTACK
76720 + bool "Randomize user stack base"
76721 + depends on PAX_ASLR
76722 + help
76723 + By saying Y here the kernel will randomize every task's userland
76724 + stack. The randomization is done in two steps where the second
76725 + one may apply a big amount of shift to the top of the stack and
76726 + cause problems for programs that want to use lots of memory (more
76727 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
76728 + For this reason the second step can be controlled by 'chpax' or
76729 + 'paxctl' on a per file basis.
76730 +
76731 +config PAX_RANDMMAP
76732 + bool "Randomize mmap() base"
76733 + depends on PAX_ASLR
76734 + help
76735 + By saying Y here the kernel will use a randomized base address for
76736 + mmap() requests that do not specify one themselves. As a result
76737 + all dynamically loaded libraries will appear at random addresses
76738 + and therefore be harder to exploit by a technique where an attacker
76739 + attempts to execute library code for his purposes (e.g. spawn a
76740 + shell from an exploited program that is running at an elevated
76741 + privilege level).
76742 +
76743 + Furthermore, if a program is relinked as a dynamic ELF file, its
76744 + base address will be randomized as well, completing the full
76745 + randomization of the address space layout. Attacking such programs
76746 + becomes a guess game. You can find an example of doing this at
76747 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
76748 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
76749 +
76750 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
76751 + feature on a per file basis.
76752 +
76753 +endmenu
76754 +
76755 +menu "Miscellaneous hardening features"
76756 +
76757 +config PAX_MEMORY_SANITIZE
76758 + bool "Sanitize all freed memory"
76759 + depends on !HIBERNATION
76760 + help
76761 + By saying Y here the kernel will erase memory pages as soon as they
76762 + are freed. This in turn reduces the lifetime of data stored in the
76763 + pages, making it less likely that sensitive information such as
76764 + passwords, cryptographic secrets, etc stay in memory for too long.
76765 +
76766 + This is especially useful for programs whose runtime is short, long
76767 + lived processes and the kernel itself benefit from this as long as
76768 + they operate on whole memory pages and ensure timely freeing of pages
76769 + that may hold sensitive information.
76770 +
76771 + The tradeoff is performance impact, on a single CPU system kernel
76772 + compilation sees a 3% slowdown, other systems and workloads may vary
76773 + and you are advised to test this feature on your expected workload
76774 + before deploying it.
76775 +
76776 + Note that this feature does not protect data stored in live pages,
76777 + e.g., process memory swapped to disk may stay there for a long time.
76778 +
76779 +config PAX_MEMORY_STACKLEAK
76780 + bool "Sanitize kernel stack"
76781 + depends on X86
76782 + help
76783 + By saying Y here the kernel will erase the kernel stack before it
76784 + returns from a system call. This in turn reduces the information
76785 + that a kernel stack leak bug can reveal.
76786 +
76787 + Note that such a bug can still leak information that was put on
76788 + the stack by the current system call (the one eventually triggering
76789 + the bug) but traces of earlier system calls on the kernel stack
76790 + cannot leak anymore.
76791 +
76792 + The tradeoff is performance impact: on a single CPU system kernel
76793 + compilation sees a 1% slowdown, other systems and workloads may vary
76794 + and you are advised to test this feature on your expected workload
76795 + before deploying it.
76796 +
76797 + Note: full support for this feature requires gcc with plugin support
76798 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
76799 + versions means that functions with large enough stack frames may
76800 + leave uninitialized memory behind that may be exposed to a later
76801 + syscall leaking the stack.
76802 +
76803 +config PAX_MEMORY_UDEREF
76804 + bool "Prevent invalid userland pointer dereference"
76805 + depends on X86 && !UML_X86 && !XEN
76806 + select PAX_PER_CPU_PGD if X86_64
76807 + help
76808 + By saying Y here the kernel will be prevented from dereferencing
76809 + userland pointers in contexts where the kernel expects only kernel
76810 + pointers. This is both a useful runtime debugging feature and a
76811 + security measure that prevents exploiting a class of kernel bugs.
76812 +
76813 + The tradeoff is that some virtualization solutions may experience
76814 + a huge slowdown and therefore you should not enable this feature
76815 + for kernels meant to run in such environments. Whether a given VM
76816 + solution is affected or not is best determined by simply trying it
76817 + out, the performance impact will be obvious right on boot as this
76818 + mechanism engages from very early on. A good rule of thumb is that
76819 + VMs running on CPUs without hardware virtualization support (i.e.,
76820 + the majority of IA-32 CPUs) will likely experience the slowdown.
76821 +
76822 +config PAX_REFCOUNT
76823 + bool "Prevent various kernel object reference counter overflows"
76824 + depends on GRKERNSEC && (X86 || SPARC64)
76825 + help
76826 + By saying Y here the kernel will detect and prevent overflowing
76827 + various (but not all) kinds of object reference counters. Such
76828 + overflows can normally occur due to bugs only and are often, if
76829 + not always, exploitable.
76830 +
76831 + The tradeoff is that data structures protected by an overflowed
76832 + refcount will never be freed and therefore will leak memory. Note
76833 + that this leak also happens even without this protection but in
76834 + that case the overflow can eventually trigger the freeing of the
76835 + data structure while it is still being used elsewhere, resulting
76836 + in the exploitable situation that this feature prevents.
76837 +
76838 + Since this has a negligible performance impact, you should enable
76839 + this feature.
76840 +
76841 +config PAX_USERCOPY
76842 + bool "Harden heap object copies between kernel and userland"
76843 + depends on X86 || PPC || SPARC || ARM
76844 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
76845 + help
76846 + By saying Y here the kernel will enforce the size of heap objects
76847 + when they are copied in either direction between the kernel and
76848 + userland, even if only a part of the heap object is copied.
76849 +
76850 + Specifically, this checking prevents information leaking from the
76851 + kernel heap during kernel to userland copies (if the kernel heap
76852 + object is otherwise fully initialized) and prevents kernel heap
76853 + overflows during userland to kernel copies.
76854 +
76855 + Note that the current implementation provides the strictest bounds
76856 + checks for the SLUB allocator.
76857 +
76858 + Enabling this option also enables per-slab cache protection against
76859 + data in a given cache being copied into/out of via userland
76860 + accessors. Though the whitelist of regions will be reduced over
76861 + time, it notably protects important data structures like task structs.
76862 +
76863 + If frame pointers are enabled on x86, this option will also restrict
76864 + copies into and out of the kernel stack to local variables within a
76865 + single frame.
76866 +
76867 + Since this has a negligible performance impact, you should enable
76868 + this feature.
76869 +
76870 +endmenu
76871 +
76872 +endmenu
76873 +
76874 config KEYS
76875 bool "Enable access key retention support"
76876 help
76877 @@ -169,7 +790,7 @@ config INTEL_TXT
76878 config LSM_MMAP_MIN_ADDR
76879 int "Low address space for LSM to protect from user allocation"
76880 depends on SECURITY && SECURITY_SELINUX
76881 - default 32768 if ARM
76882 + default 32768 if ALPHA || ARM || PARISC || SPARC32
76883 default 65536
76884 help
76885 This is the portion of low virtual memory which should be protected
76886 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
76887 index 3783202..1852837 100644
76888 --- a/security/apparmor/lsm.c
76889 +++ b/security/apparmor/lsm.c
76890 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
76891 return error;
76892 }
76893
76894 -static struct security_operations apparmor_ops = {
76895 +static struct security_operations apparmor_ops __read_only = {
76896 .name = "apparmor",
76897
76898 .ptrace_access_check = apparmor_ptrace_access_check,
76899 diff --git a/security/commoncap.c b/security/commoncap.c
76900 index ee4f848..a320c64 100644
76901 --- a/security/commoncap.c
76902 +++ b/security/commoncap.c
76903 @@ -28,6 +28,7 @@
76904 #include <linux/prctl.h>
76905 #include <linux/securebits.h>
76906 #include <linux/user_namespace.h>
76907 +#include <net/sock.h>
76908
76909 /*
76910 * If a non-root user executes a setuid-root binary in
76911 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
76912
76913 int cap_netlink_recv(struct sk_buff *skb, int cap)
76914 {
76915 - if (!cap_raised(current_cap(), cap))
76916 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
76917 return -EPERM;
76918 return 0;
76919 }
76920 @@ -579,6 +580,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
76921 {
76922 const struct cred *cred = current_cred();
76923
76924 + if (gr_acl_enable_at_secure())
76925 + return 1;
76926 +
76927 if (cred->uid != 0) {
76928 if (bprm->cap_effective)
76929 return 1;
76930 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
76931 index 3ccf7ac..d73ad64 100644
76932 --- a/security/integrity/ima/ima.h
76933 +++ b/security/integrity/ima/ima.h
76934 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76935 extern spinlock_t ima_queue_lock;
76936
76937 struct ima_h_table {
76938 - atomic_long_t len; /* number of stored measurements in the list */
76939 - atomic_long_t violations;
76940 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
76941 + atomic_long_unchecked_t violations;
76942 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
76943 };
76944 extern struct ima_h_table ima_htable;
76945 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
76946 index 88a2788..581ab92 100644
76947 --- a/security/integrity/ima/ima_api.c
76948 +++ b/security/integrity/ima/ima_api.c
76949 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76950 int result;
76951
76952 /* can overflow, only indicator */
76953 - atomic_long_inc(&ima_htable.violations);
76954 + atomic_long_inc_unchecked(&ima_htable.violations);
76955
76956 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
76957 if (!entry) {
76958 diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
76959 index c5c5a72..2ad942f 100644
76960 --- a/security/integrity/ima/ima_audit.c
76961 +++ b/security/integrity/ima/ima_audit.c
76962 @@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
76963 audit_log_format(ab, " name=");
76964 audit_log_untrustedstring(ab, fname);
76965 }
76966 - if (inode)
76967 - audit_log_format(ab, " dev=%s ino=%lu",
76968 - inode->i_sb->s_id, inode->i_ino);
76969 + if (inode) {
76970 + audit_log_format(ab, " dev=");
76971 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76972 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76973 + }
76974 audit_log_format(ab, " res=%d", !result ? 0 : 1);
76975 audit_log_end(ab);
76976 }
76977 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
76978 index e1aa2b4..52027bf 100644
76979 --- a/security/integrity/ima/ima_fs.c
76980 +++ b/security/integrity/ima/ima_fs.c
76981 @@ -28,12 +28,12 @@
76982 static int valid_policy = 1;
76983 #define TMPBUFLEN 12
76984 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
76985 - loff_t *ppos, atomic_long_t *val)
76986 + loff_t *ppos, atomic_long_unchecked_t *val)
76987 {
76988 char tmpbuf[TMPBUFLEN];
76989 ssize_t len;
76990
76991 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
76992 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
76993 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
76994 }
76995
76996 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
76997 index 55a6271..ad829c3 100644
76998 --- a/security/integrity/ima/ima_queue.c
76999 +++ b/security/integrity/ima/ima_queue.c
77000 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
77001 INIT_LIST_HEAD(&qe->later);
77002 list_add_tail_rcu(&qe->later, &ima_measurements);
77003
77004 - atomic_long_inc(&ima_htable.len);
77005 + atomic_long_inc_unchecked(&ima_htable.len);
77006 key = ima_hash_key(entry->digest);
77007 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
77008 return 0;
77009 diff --git a/security/keys/compat.c b/security/keys/compat.c
77010 index 4c48e13..7abdac9 100644
77011 --- a/security/keys/compat.c
77012 +++ b/security/keys/compat.c
77013 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
77014 if (ret == 0)
77015 goto no_payload_free;
77016
77017 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
77018 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
77019
77020 if (iov != iovstack)
77021 kfree(iov);
77022 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
77023 index 0b3f5d7..892c8a6 100644
77024 --- a/security/keys/keyctl.c
77025 +++ b/security/keys/keyctl.c
77026 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
77027 /*
77028 * Copy the iovec data from userspace
77029 */
77030 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
77031 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
77032 unsigned ioc)
77033 {
77034 for (; ioc > 0; ioc--) {
77035 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
77036 * If successful, 0 will be returned.
77037 */
77038 long keyctl_instantiate_key_common(key_serial_t id,
77039 - const struct iovec *payload_iov,
77040 + const struct iovec __user *payload_iov,
77041 unsigned ioc,
77042 size_t plen,
77043 key_serial_t ringid)
77044 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
77045 [0].iov_len = plen
77046 };
77047
77048 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
77049 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
77050 }
77051
77052 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
77053 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
77054 if (ret == 0)
77055 goto no_payload_free;
77056
77057 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
77058 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
77059
77060 if (iov != iovstack)
77061 kfree(iov);
77062 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
77063 index 37a7f3b..86dc19f 100644
77064 --- a/security/keys/keyring.c
77065 +++ b/security/keys/keyring.c
77066 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
77067 ret = -EFAULT;
77068
77069 for (loop = 0; loop < klist->nkeys; loop++) {
77070 + key_serial_t serial;
77071 key = klist->keys[loop];
77072 + serial = key->serial;
77073
77074 tmp = sizeof(key_serial_t);
77075 if (tmp > buflen)
77076 tmp = buflen;
77077
77078 - if (copy_to_user(buffer,
77079 - &key->serial,
77080 - tmp) != 0)
77081 + if (copy_to_user(buffer, &serial, tmp))
77082 goto error;
77083
77084 buflen -= tmp;
77085 diff --git a/security/lsm_audit.c b/security/lsm_audit.c
77086 index 893af8a..ba9237c 100644
77087 --- a/security/lsm_audit.c
77088 +++ b/security/lsm_audit.c
77089 @@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77090 audit_log_d_path(ab, "path=", &a->u.path);
77091
77092 inode = a->u.path.dentry->d_inode;
77093 - if (inode)
77094 - audit_log_format(ab, " dev=%s ino=%lu",
77095 - inode->i_sb->s_id,
77096 - inode->i_ino);
77097 + if (inode) {
77098 + audit_log_format(ab, " dev=");
77099 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77100 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77101 + }
77102 break;
77103 }
77104 case LSM_AUDIT_DATA_DENTRY: {
77105 @@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77106 audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
77107
77108 inode = a->u.dentry->d_inode;
77109 - if (inode)
77110 - audit_log_format(ab, " dev=%s ino=%lu",
77111 - inode->i_sb->s_id,
77112 - inode->i_ino);
77113 + if (inode) {
77114 + audit_log_format(ab, " dev=");
77115 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77116 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77117 + }
77118 break;
77119 }
77120 case LSM_AUDIT_DATA_INODE: {
77121 @@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77122 dentry->d_name.name);
77123 dput(dentry);
77124 }
77125 - audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
77126 - inode->i_ino);
77127 + audit_log_format(ab, " dev=");
77128 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77129 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77130 break;
77131 }
77132 case LSM_AUDIT_DATA_TASK:
77133 diff --git a/security/min_addr.c b/security/min_addr.c
77134 index f728728..6457a0c 100644
77135 --- a/security/min_addr.c
77136 +++ b/security/min_addr.c
77137 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
77138 */
77139 static void update_mmap_min_addr(void)
77140 {
77141 +#ifndef SPARC
77142 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
77143 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
77144 mmap_min_addr = dac_mmap_min_addr;
77145 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
77146 #else
77147 mmap_min_addr = dac_mmap_min_addr;
77148 #endif
77149 +#endif
77150 }
77151
77152 /*
77153 diff --git a/security/security.c b/security/security.c
77154 index e2f684a..8d62ef5 100644
77155 --- a/security/security.c
77156 +++ b/security/security.c
77157 @@ -26,8 +26,8 @@
77158 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
77159 CONFIG_DEFAULT_SECURITY;
77160
77161 -static struct security_operations *security_ops;
77162 -static struct security_operations default_security_ops = {
77163 +static struct security_operations *security_ops __read_only;
77164 +static struct security_operations default_security_ops __read_only = {
77165 .name = "default",
77166 };
77167
77168 @@ -68,7 +68,9 @@ int __init security_init(void)
77169
77170 void reset_security_ops(void)
77171 {
77172 + pax_open_kernel();
77173 security_ops = &default_security_ops;
77174 + pax_close_kernel();
77175 }
77176
77177 /* Save user chosen LSM */
77178 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
77179 index 1126c10..effb32b 100644
77180 --- a/security/selinux/hooks.c
77181 +++ b/security/selinux/hooks.c
77182 @@ -94,8 +94,6 @@
77183
77184 #define NUM_SEL_MNT_OPTS 5
77185
77186 -extern struct security_operations *security_ops;
77187 -
77188 /* SECMARK reference count */
77189 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
77190
77191 @@ -5449,7 +5447,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
77192
77193 #endif
77194
77195 -static struct security_operations selinux_ops = {
77196 +static struct security_operations selinux_ops __read_only = {
77197 .name = "selinux",
77198
77199 .ptrace_access_check = selinux_ptrace_access_check,
77200 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
77201 index b43813c..74be837 100644
77202 --- a/security/selinux/include/xfrm.h
77203 +++ b/security/selinux/include/xfrm.h
77204 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
77205
77206 static inline void selinux_xfrm_notify_policyload(void)
77207 {
77208 - atomic_inc(&flow_cache_genid);
77209 + atomic_inc_unchecked(&flow_cache_genid);
77210 }
77211 #else
77212 static inline int selinux_xfrm_enabled(void)
77213 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
77214 index 7db62b4..ee4d949 100644
77215 --- a/security/smack/smack_lsm.c
77216 +++ b/security/smack/smack_lsm.c
77217 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
77218 return 0;
77219 }
77220
77221 -struct security_operations smack_ops = {
77222 +struct security_operations smack_ops __read_only = {
77223 .name = "smack",
77224
77225 .ptrace_access_check = smack_ptrace_access_check,
77226 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
77227 index 4b327b6..646c57a 100644
77228 --- a/security/tomoyo/tomoyo.c
77229 +++ b/security/tomoyo/tomoyo.c
77230 @@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
77231 * tomoyo_security_ops is a "struct security_operations" which is used for
77232 * registering TOMOYO.
77233 */
77234 -static struct security_operations tomoyo_security_ops = {
77235 +static struct security_operations tomoyo_security_ops __read_only = {
77236 .name = "tomoyo",
77237 .cred_alloc_blank = tomoyo_cred_alloc_blank,
77238 .cred_prepare = tomoyo_cred_prepare,
77239 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
77240 index 762af68..7103453 100644
77241 --- a/sound/aoa/codecs/onyx.c
77242 +++ b/sound/aoa/codecs/onyx.c
77243 @@ -54,7 +54,7 @@ struct onyx {
77244 spdif_locked:1,
77245 analog_locked:1,
77246 original_mute:2;
77247 - int open_count;
77248 + local_t open_count;
77249 struct codec_info *codec_info;
77250
77251 /* mutex serializes concurrent access to the device
77252 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
77253 struct onyx *onyx = cii->codec_data;
77254
77255 mutex_lock(&onyx->mutex);
77256 - onyx->open_count++;
77257 + local_inc(&onyx->open_count);
77258 mutex_unlock(&onyx->mutex);
77259
77260 return 0;
77261 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
77262 struct onyx *onyx = cii->codec_data;
77263
77264 mutex_lock(&onyx->mutex);
77265 - onyx->open_count--;
77266 - if (!onyx->open_count)
77267 + if (local_dec_and_test(&onyx->open_count))
77268 onyx->spdif_locked = onyx->analog_locked = 0;
77269 mutex_unlock(&onyx->mutex);
77270
77271 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
77272 index ffd2025..df062c9 100644
77273 --- a/sound/aoa/codecs/onyx.h
77274 +++ b/sound/aoa/codecs/onyx.h
77275 @@ -11,6 +11,7 @@
77276 #include <linux/i2c.h>
77277 #include <asm/pmac_low_i2c.h>
77278 #include <asm/prom.h>
77279 +#include <asm/local.h>
77280
77281 /* PCM3052 register definitions */
77282
77283 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
77284 index 3cc4b86..af0a951 100644
77285 --- a/sound/core/oss/pcm_oss.c
77286 +++ b/sound/core/oss/pcm_oss.c
77287 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
77288 if (in_kernel) {
77289 mm_segment_t fs;
77290 fs = snd_enter_user();
77291 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
77292 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
77293 snd_leave_user(fs);
77294 } else {
77295 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
77296 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
77297 }
77298 if (ret != -EPIPE && ret != -ESTRPIPE)
77299 break;
77300 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
77301 if (in_kernel) {
77302 mm_segment_t fs;
77303 fs = snd_enter_user();
77304 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
77305 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
77306 snd_leave_user(fs);
77307 } else {
77308 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
77309 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
77310 }
77311 if (ret == -EPIPE) {
77312 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
77313 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
77314 struct snd_pcm_plugin_channel *channels;
77315 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
77316 if (!in_kernel) {
77317 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
77318 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
77319 return -EFAULT;
77320 buf = runtime->oss.buffer;
77321 }
77322 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
77323 }
77324 } else {
77325 tmp = snd_pcm_oss_write2(substream,
77326 - (const char __force *)buf,
77327 + (const char __force_kernel *)buf,
77328 runtime->oss.period_bytes, 0);
77329 if (tmp <= 0)
77330 goto err;
77331 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
77332 struct snd_pcm_runtime *runtime = substream->runtime;
77333 snd_pcm_sframes_t frames, frames1;
77334 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
77335 - char __user *final_dst = (char __force __user *)buf;
77336 + char __user *final_dst = (char __force_user *)buf;
77337 if (runtime->oss.plugin_first) {
77338 struct snd_pcm_plugin_channel *channels;
77339 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
77340 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
77341 xfer += tmp;
77342 runtime->oss.buffer_used -= tmp;
77343 } else {
77344 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
77345 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
77346 runtime->oss.period_bytes, 0);
77347 if (tmp <= 0)
77348 goto err;
77349 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
77350 size1);
77351 size1 /= runtime->channels; /* frames */
77352 fs = snd_enter_user();
77353 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
77354 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
77355 snd_leave_user(fs);
77356 }
77357 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
77358 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
77359 index 91cdf94..4085161 100644
77360 --- a/sound/core/pcm_compat.c
77361 +++ b/sound/core/pcm_compat.c
77362 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
77363 int err;
77364
77365 fs = snd_enter_user();
77366 - err = snd_pcm_delay(substream, &delay);
77367 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
77368 snd_leave_user(fs);
77369 if (err < 0)
77370 return err;
77371 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
77372 index 25ed9fe..24c46e9 100644
77373 --- a/sound/core/pcm_native.c
77374 +++ b/sound/core/pcm_native.c
77375 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
77376 switch (substream->stream) {
77377 case SNDRV_PCM_STREAM_PLAYBACK:
77378 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
77379 - (void __user *)arg);
77380 + (void __force_user *)arg);
77381 break;
77382 case SNDRV_PCM_STREAM_CAPTURE:
77383 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
77384 - (void __user *)arg);
77385 + (void __force_user *)arg);
77386 break;
77387 default:
77388 result = -EINVAL;
77389 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
77390 index 5cf8d65..912a79c 100644
77391 --- a/sound/core/seq/seq_device.c
77392 +++ b/sound/core/seq/seq_device.c
77393 @@ -64,7 +64,7 @@ struct ops_list {
77394 int argsize; /* argument size */
77395
77396 /* operators */
77397 - struct snd_seq_dev_ops ops;
77398 + struct snd_seq_dev_ops *ops;
77399
77400 /* registred devices */
77401 struct list_head dev_list; /* list of devices */
77402 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
77403
77404 mutex_lock(&ops->reg_mutex);
77405 /* copy driver operators */
77406 - ops->ops = *entry;
77407 + ops->ops = entry;
77408 ops->driver |= DRIVER_LOADED;
77409 ops->argsize = argsize;
77410
77411 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
77412 dev->name, ops->id, ops->argsize, dev->argsize);
77413 return -EINVAL;
77414 }
77415 - if (ops->ops.init_device(dev) >= 0) {
77416 + if (ops->ops->init_device(dev) >= 0) {
77417 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
77418 ops->num_init_devices++;
77419 } else {
77420 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
77421 dev->name, ops->id, ops->argsize, dev->argsize);
77422 return -EINVAL;
77423 }
77424 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
77425 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
77426 dev->status = SNDRV_SEQ_DEVICE_FREE;
77427 dev->driver_data = NULL;
77428 ops->num_init_devices--;
77429 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
77430 index f24bf9a..1f7b67c 100644
77431 --- a/sound/drivers/mts64.c
77432 +++ b/sound/drivers/mts64.c
77433 @@ -29,6 +29,7 @@
77434 #include <sound/initval.h>
77435 #include <sound/rawmidi.h>
77436 #include <sound/control.h>
77437 +#include <asm/local.h>
77438
77439 #define CARD_NAME "Miditerminal 4140"
77440 #define DRIVER_NAME "MTS64"
77441 @@ -67,7 +68,7 @@ struct mts64 {
77442 struct pardevice *pardev;
77443 int pardev_claimed;
77444
77445 - int open_count;
77446 + local_t open_count;
77447 int current_midi_output_port;
77448 int current_midi_input_port;
77449 u8 mode[MTS64_NUM_INPUT_PORTS];
77450 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77451 {
77452 struct mts64 *mts = substream->rmidi->private_data;
77453
77454 - if (mts->open_count == 0) {
77455 + if (local_read(&mts->open_count) == 0) {
77456 /* We don't need a spinlock here, because this is just called
77457 if the device has not been opened before.
77458 So there aren't any IRQs from the device */
77459 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77460
77461 msleep(50);
77462 }
77463 - ++(mts->open_count);
77464 + local_inc(&mts->open_count);
77465
77466 return 0;
77467 }
77468 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77469 struct mts64 *mts = substream->rmidi->private_data;
77470 unsigned long flags;
77471
77472 - --(mts->open_count);
77473 - if (mts->open_count == 0) {
77474 + if (local_dec_return(&mts->open_count) == 0) {
77475 /* We need the spinlock_irqsave here because we can still
77476 have IRQs at this point */
77477 spin_lock_irqsave(&mts->lock, flags);
77478 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77479
77480 msleep(500);
77481
77482 - } else if (mts->open_count < 0)
77483 - mts->open_count = 0;
77484 + } else if (local_read(&mts->open_count) < 0)
77485 + local_set(&mts->open_count, 0);
77486
77487 return 0;
77488 }
77489 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
77490 index b953fb4..1999c01 100644
77491 --- a/sound/drivers/opl4/opl4_lib.c
77492 +++ b/sound/drivers/opl4/opl4_lib.c
77493 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
77494 MODULE_DESCRIPTION("OPL4 driver");
77495 MODULE_LICENSE("GPL");
77496
77497 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
77498 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
77499 {
77500 int timeout = 10;
77501 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
77502 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
77503 index f664823..590c745 100644
77504 --- a/sound/drivers/portman2x4.c
77505 +++ b/sound/drivers/portman2x4.c
77506 @@ -48,6 +48,7 @@
77507 #include <sound/initval.h>
77508 #include <sound/rawmidi.h>
77509 #include <sound/control.h>
77510 +#include <asm/local.h>
77511
77512 #define CARD_NAME "Portman 2x4"
77513 #define DRIVER_NAME "portman"
77514 @@ -85,7 +86,7 @@ struct portman {
77515 struct pardevice *pardev;
77516 int pardev_claimed;
77517
77518 - int open_count;
77519 + local_t open_count;
77520 int mode[PORTMAN_NUM_INPUT_PORTS];
77521 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
77522 };
77523 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
77524 index 87657dd..a8268d4 100644
77525 --- a/sound/firewire/amdtp.c
77526 +++ b/sound/firewire/amdtp.c
77527 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
77528 ptr = s->pcm_buffer_pointer + data_blocks;
77529 if (ptr >= pcm->runtime->buffer_size)
77530 ptr -= pcm->runtime->buffer_size;
77531 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
77532 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
77533
77534 s->pcm_period_pointer += data_blocks;
77535 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
77536 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
77537 */
77538 void amdtp_out_stream_update(struct amdtp_out_stream *s)
77539 {
77540 - ACCESS_ONCE(s->source_node_id_field) =
77541 + ACCESS_ONCE_RW(s->source_node_id_field) =
77542 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
77543 }
77544 EXPORT_SYMBOL(amdtp_out_stream_update);
77545 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
77546 index 537a9cb..8e8c8e9 100644
77547 --- a/sound/firewire/amdtp.h
77548 +++ b/sound/firewire/amdtp.h
77549 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
77550 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
77551 struct snd_pcm_substream *pcm)
77552 {
77553 - ACCESS_ONCE(s->pcm) = pcm;
77554 + ACCESS_ONCE_RW(s->pcm) = pcm;
77555 }
77556
77557 /**
77558 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
77559 index cd094ec..eca1277 100644
77560 --- a/sound/firewire/isight.c
77561 +++ b/sound/firewire/isight.c
77562 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
77563 ptr += count;
77564 if (ptr >= runtime->buffer_size)
77565 ptr -= runtime->buffer_size;
77566 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
77567 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
77568
77569 isight->period_counter += count;
77570 if (isight->period_counter >= runtime->period_size) {
77571 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
77572 if (err < 0)
77573 return err;
77574
77575 - ACCESS_ONCE(isight->pcm_active) = true;
77576 + ACCESS_ONCE_RW(isight->pcm_active) = true;
77577
77578 return 0;
77579 }
77580 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
77581 {
77582 struct isight *isight = substream->private_data;
77583
77584 - ACCESS_ONCE(isight->pcm_active) = false;
77585 + ACCESS_ONCE_RW(isight->pcm_active) = false;
77586
77587 mutex_lock(&isight->mutex);
77588 isight_stop_streaming(isight);
77589 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
77590
77591 switch (cmd) {
77592 case SNDRV_PCM_TRIGGER_START:
77593 - ACCESS_ONCE(isight->pcm_running) = true;
77594 + ACCESS_ONCE_RW(isight->pcm_running) = true;
77595 break;
77596 case SNDRV_PCM_TRIGGER_STOP:
77597 - ACCESS_ONCE(isight->pcm_running) = false;
77598 + ACCESS_ONCE_RW(isight->pcm_running) = false;
77599 break;
77600 default:
77601 return -EINVAL;
77602 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
77603 index c94578d..0794ac1 100644
77604 --- a/sound/isa/cmi8330.c
77605 +++ b/sound/isa/cmi8330.c
77606 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
77607
77608 struct snd_pcm *pcm;
77609 struct snd_cmi8330_stream {
77610 - struct snd_pcm_ops ops;
77611 + snd_pcm_ops_no_const ops;
77612 snd_pcm_open_callback_t open;
77613 void *private_data; /* sb or wss */
77614 } streams[2];
77615 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
77616 index 733b014..56ce96f 100644
77617 --- a/sound/oss/sb_audio.c
77618 +++ b/sound/oss/sb_audio.c
77619 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
77620 buf16 = (signed short *)(localbuf + localoffs);
77621 while (c)
77622 {
77623 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77624 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77625 if (copy_from_user(lbuf8,
77626 userbuf+useroffs + p,
77627 locallen))
77628 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
77629 index 09d4648..cf234c7 100644
77630 --- a/sound/oss/swarm_cs4297a.c
77631 +++ b/sound/oss/swarm_cs4297a.c
77632 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
77633 {
77634 struct cs4297a_state *s;
77635 u32 pwr, id;
77636 - mm_segment_t fs;
77637 int rval;
77638 #ifndef CONFIG_BCM_CS4297A_CSWARM
77639 u64 cfg;
77640 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
77641 if (!rval) {
77642 char *sb1250_duart_present;
77643
77644 +#if 0
77645 + mm_segment_t fs;
77646 fs = get_fs();
77647 set_fs(KERNEL_DS);
77648 -#if 0
77649 val = SOUND_MASK_LINE;
77650 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
77651 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
77652 val = initvol[i].vol;
77653 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
77654 }
77655 + set_fs(fs);
77656 // cs4297a_write_ac97(s, 0x18, 0x0808);
77657 #else
77658 // cs4297a_write_ac97(s, 0x5e, 0x180);
77659 cs4297a_write_ac97(s, 0x02, 0x0808);
77660 cs4297a_write_ac97(s, 0x18, 0x0808);
77661 #endif
77662 - set_fs(fs);
77663
77664 list_add(&s->list, &cs4297a_devs);
77665
77666 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
77667 index 5644711..a2aebc1 100644
77668 --- a/sound/pci/hda/hda_codec.h
77669 +++ b/sound/pci/hda/hda_codec.h
77670 @@ -611,7 +611,7 @@ struct hda_bus_ops {
77671 /* notify power-up/down from codec to controller */
77672 void (*pm_notify)(struct hda_bus *bus);
77673 #endif
77674 -};
77675 +} __no_const;
77676
77677 /* template to pass to the bus constructor */
77678 struct hda_bus_template {
77679 @@ -713,6 +713,7 @@ struct hda_codec_ops {
77680 #endif
77681 void (*reboot_notify)(struct hda_codec *codec);
77682 };
77683 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
77684
77685 /* record for amp information cache */
77686 struct hda_cache_head {
77687 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
77688 struct snd_pcm_substream *substream);
77689 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
77690 struct snd_pcm_substream *substream);
77691 -};
77692 +} __no_const;
77693
77694 /* PCM information for each substream */
77695 struct hda_pcm_stream {
77696 @@ -801,7 +802,7 @@ struct hda_codec {
77697 const char *modelname; /* model name for preset */
77698
77699 /* set by patch */
77700 - struct hda_codec_ops patch_ops;
77701 + hda_codec_ops_no_const patch_ops;
77702
77703 /* PCM to create, set by patch_ops.build_pcms callback */
77704 unsigned int num_pcms;
77705 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
77706 index 0da778a..bc38b84 100644
77707 --- a/sound/pci/ice1712/ice1712.h
77708 +++ b/sound/pci/ice1712/ice1712.h
77709 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
77710 unsigned int mask_flags; /* total mask bits */
77711 struct snd_akm4xxx_ops {
77712 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
77713 - } ops;
77714 + } __no_const ops;
77715 };
77716
77717 struct snd_ice1712_spdif {
77718 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
77719 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77720 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77721 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77722 - } ops;
77723 + } __no_const ops;
77724 };
77725
77726
77727 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
77728 index 03ee4e3..be86b46 100644
77729 --- a/sound/pci/ymfpci/ymfpci_main.c
77730 +++ b/sound/pci/ymfpci/ymfpci_main.c
77731 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
77732 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
77733 break;
77734 }
77735 - if (atomic_read(&chip->interrupt_sleep_count)) {
77736 - atomic_set(&chip->interrupt_sleep_count, 0);
77737 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77738 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77739 wake_up(&chip->interrupt_sleep);
77740 }
77741 __end:
77742 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
77743 continue;
77744 init_waitqueue_entry(&wait, current);
77745 add_wait_queue(&chip->interrupt_sleep, &wait);
77746 - atomic_inc(&chip->interrupt_sleep_count);
77747 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
77748 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
77749 remove_wait_queue(&chip->interrupt_sleep, &wait);
77750 }
77751 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
77752 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
77753 spin_unlock(&chip->reg_lock);
77754
77755 - if (atomic_read(&chip->interrupt_sleep_count)) {
77756 - atomic_set(&chip->interrupt_sleep_count, 0);
77757 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77758 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77759 wake_up(&chip->interrupt_sleep);
77760 }
77761 }
77762 @@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
77763 spin_lock_init(&chip->reg_lock);
77764 spin_lock_init(&chip->voice_lock);
77765 init_waitqueue_head(&chip->interrupt_sleep);
77766 - atomic_set(&chip->interrupt_sleep_count, 0);
77767 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77768 chip->card = card;
77769 chip->pci = pci;
77770 chip->irq = -1;
77771 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
77772 index ee15337..e2187a6 100644
77773 --- a/sound/soc/soc-pcm.c
77774 +++ b/sound/soc/soc-pcm.c
77775 @@ -583,7 +583,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
77776 }
77777
77778 /* ASoC PCM operations */
77779 -static struct snd_pcm_ops soc_pcm_ops = {
77780 +static snd_pcm_ops_no_const soc_pcm_ops = {
77781 .open = soc_pcm_open,
77782 .close = soc_pcm_close,
77783 .hw_params = soc_pcm_hw_params,
77784 diff --git a/sound/usb/card.h b/sound/usb/card.h
77785 index a39edcc..1014050 100644
77786 --- a/sound/usb/card.h
77787 +++ b/sound/usb/card.h
77788 @@ -44,6 +44,7 @@ struct snd_urb_ops {
77789 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77790 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77791 };
77792 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
77793
77794 struct snd_usb_substream {
77795 struct snd_usb_stream *stream;
77796 @@ -93,7 +94,7 @@ struct snd_usb_substream {
77797 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
77798 spinlock_t lock;
77799
77800 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
77801 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
77802 int last_frame_number; /* stored frame number */
77803 int last_delay; /* stored delay */
77804 };
77805 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
77806 new file mode 100644
77807 index 0000000..29b6b75
77808 --- /dev/null
77809 +++ b/tools/gcc/Makefile
77810 @@ -0,0 +1,21 @@
77811 +#CC := gcc
77812 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77813 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77814 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
77815 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
77816 +
77817 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99
77818 +
77819 +hostlibs-y := constify_plugin.so
77820 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
77821 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
77822 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
77823 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
77824 +
77825 +always := $(hostlibs-y)
77826 +
77827 +constify_plugin-objs := constify_plugin.o
77828 +stackleak_plugin-objs := stackleak_plugin.o
77829 +kallocstat_plugin-objs := kallocstat_plugin.o
77830 +kernexec_plugin-objs := kernexec_plugin.o
77831 +checker_plugin-objs := checker_plugin.o
77832 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
77833 new file mode 100644
77834 index 0000000..d41b5af
77835 --- /dev/null
77836 +++ b/tools/gcc/checker_plugin.c
77837 @@ -0,0 +1,171 @@
77838 +/*
77839 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77840 + * Licensed under the GPL v2
77841 + *
77842 + * Note: the choice of the license means that the compilation process is
77843 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77844 + * but for the kernel it doesn't matter since it doesn't link against
77845 + * any of the gcc libraries
77846 + *
77847 + * gcc plugin to implement various sparse (source code checker) features
77848 + *
77849 + * TODO:
77850 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
77851 + *
77852 + * BUGS:
77853 + * - none known
77854 + */
77855 +#include "gcc-plugin.h"
77856 +#include "config.h"
77857 +#include "system.h"
77858 +#include "coretypes.h"
77859 +#include "tree.h"
77860 +#include "tree-pass.h"
77861 +#include "flags.h"
77862 +#include "intl.h"
77863 +#include "toplev.h"
77864 +#include "plugin.h"
77865 +//#include "expr.h" where are you...
77866 +#include "diagnostic.h"
77867 +#include "plugin-version.h"
77868 +#include "tm.h"
77869 +#include "function.h"
77870 +#include "basic-block.h"
77871 +#include "gimple.h"
77872 +#include "rtl.h"
77873 +#include "emit-rtl.h"
77874 +#include "tree-flow.h"
77875 +#include "target.h"
77876 +
77877 +extern void c_register_addr_space (const char *str, addr_space_t as);
77878 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
77879 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
77880 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
77881 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
77882 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
77883 +
77884 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77885 +extern rtx emit_move_insn(rtx x, rtx y);
77886 +
77887 +int plugin_is_GPL_compatible;
77888 +
77889 +static struct plugin_info checker_plugin_info = {
77890 + .version = "201111150100",
77891 +};
77892 +
77893 +#define ADDR_SPACE_KERNEL 0
77894 +#define ADDR_SPACE_FORCE_KERNEL 1
77895 +#define ADDR_SPACE_USER 2
77896 +#define ADDR_SPACE_FORCE_USER 3
77897 +#define ADDR_SPACE_IOMEM 0
77898 +#define ADDR_SPACE_FORCE_IOMEM 0
77899 +#define ADDR_SPACE_PERCPU 0
77900 +#define ADDR_SPACE_FORCE_PERCPU 0
77901 +#define ADDR_SPACE_RCU 0
77902 +#define ADDR_SPACE_FORCE_RCU 0
77903 +
77904 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
77905 +{
77906 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
77907 +}
77908 +
77909 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
77910 +{
77911 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
77912 +}
77913 +
77914 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
77915 +{
77916 + return default_addr_space_valid_pointer_mode(mode, as);
77917 +}
77918 +
77919 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
77920 +{
77921 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
77922 +}
77923 +
77924 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
77925 +{
77926 + return default_addr_space_legitimize_address(x, oldx, mode, as);
77927 +}
77928 +
77929 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
77930 +{
77931 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
77932 + return true;
77933 +
77934 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
77935 + return true;
77936 +
77937 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
77938 + return true;
77939 +
77940 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
77941 + return true;
77942 +
77943 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
77944 + return true;
77945 +
77946 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
77947 + return true;
77948 +
77949 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
77950 + return true;
77951 +
77952 + return subset == superset;
77953 +}
77954 +
77955 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
77956 +{
77957 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
77958 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
77959 +
77960 + return op;
77961 +}
77962 +
77963 +static void register_checker_address_spaces(void *event_data, void *data)
77964 +{
77965 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
77966 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
77967 + c_register_addr_space("__user", ADDR_SPACE_USER);
77968 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
77969 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
77970 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
77971 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
77972 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
77973 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
77974 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
77975 +
77976 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
77977 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
77978 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
77979 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
77980 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
77981 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
77982 + targetm.addr_space.convert = checker_addr_space_convert;
77983 +}
77984 +
77985 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77986 +{
77987 + const char * const plugin_name = plugin_info->base_name;
77988 + const int argc = plugin_info->argc;
77989 + const struct plugin_argument * const argv = plugin_info->argv;
77990 + int i;
77991 +
77992 + if (!plugin_default_version_check(version, &gcc_version)) {
77993 + error(G_("incompatible gcc/plugin versions"));
77994 + return 1;
77995 + }
77996 +
77997 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
77998 +
77999 + for (i = 0; i < argc; ++i)
78000 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78001 +
78002 + if (TARGET_64BIT == 0)
78003 + return 0;
78004 +
78005 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
78006 +
78007 + return 0;
78008 +}
78009 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
78010 new file mode 100644
78011 index 0000000..704a564
78012 --- /dev/null
78013 +++ b/tools/gcc/constify_plugin.c
78014 @@ -0,0 +1,303 @@
78015 +/*
78016 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
78017 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
78018 + * Licensed under the GPL v2, or (at your option) v3
78019 + *
78020 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
78021 + *
78022 + * Homepage:
78023 + * http://www.grsecurity.net/~ephox/const_plugin/
78024 + *
78025 + * Usage:
78026 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
78027 + * $ gcc -fplugin=constify_plugin.so test.c -O2
78028 + */
78029 +
78030 +#include "gcc-plugin.h"
78031 +#include "config.h"
78032 +#include "system.h"
78033 +#include "coretypes.h"
78034 +#include "tree.h"
78035 +#include "tree-pass.h"
78036 +#include "flags.h"
78037 +#include "intl.h"
78038 +#include "toplev.h"
78039 +#include "plugin.h"
78040 +#include "diagnostic.h"
78041 +#include "plugin-version.h"
78042 +#include "tm.h"
78043 +#include "function.h"
78044 +#include "basic-block.h"
78045 +#include "gimple.h"
78046 +#include "rtl.h"
78047 +#include "emit-rtl.h"
78048 +#include "tree-flow.h"
78049 +
78050 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
78051 +
78052 +int plugin_is_GPL_compatible;
78053 +
78054 +static struct plugin_info const_plugin_info = {
78055 + .version = "201111150100",
78056 + .help = "no-constify\tturn off constification\n",
78057 +};
78058 +
78059 +static void constify_type(tree type);
78060 +static bool walk_struct(tree node);
78061 +
78062 +static tree deconstify_type(tree old_type)
78063 +{
78064 + tree new_type, field;
78065 +
78066 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
78067 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
78068 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
78069 + DECL_FIELD_CONTEXT(field) = new_type;
78070 + TYPE_READONLY(new_type) = 0;
78071 + C_TYPE_FIELDS_READONLY(new_type) = 0;
78072 + return new_type;
78073 +}
78074 +
78075 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
78076 +{
78077 + tree type;
78078 +
78079 + *no_add_attrs = true;
78080 + if (TREE_CODE(*node) == FUNCTION_DECL) {
78081 + error("%qE attribute does not apply to functions", name);
78082 + return NULL_TREE;
78083 + }
78084 +
78085 + if (TREE_CODE(*node) == VAR_DECL) {
78086 + error("%qE attribute does not apply to variables", name);
78087 + return NULL_TREE;
78088 + }
78089 +
78090 + if (TYPE_P(*node)) {
78091 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
78092 + *no_add_attrs = false;
78093 + else
78094 + error("%qE attribute applies to struct and union types only", name);
78095 + return NULL_TREE;
78096 + }
78097 +
78098 + type = TREE_TYPE(*node);
78099 +
78100 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
78101 + error("%qE attribute applies to struct and union types only", name);
78102 + return NULL_TREE;
78103 + }
78104 +
78105 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
78106 + error("%qE attribute is already applied to the type", name);
78107 + return NULL_TREE;
78108 + }
78109 +
78110 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
78111 + error("%qE attribute used on type that is not constified", name);
78112 + return NULL_TREE;
78113 + }
78114 +
78115 + if (TREE_CODE(*node) == TYPE_DECL) {
78116 + TREE_TYPE(*node) = deconstify_type(type);
78117 + TREE_READONLY(*node) = 0;
78118 + return NULL_TREE;
78119 + }
78120 +
78121 + return NULL_TREE;
78122 +}
78123 +
78124 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
78125 +{
78126 + *no_add_attrs = true;
78127 + if (!TYPE_P(*node)) {
78128 + error("%qE attribute applies to types only", name);
78129 + return NULL_TREE;
78130 + }
78131 +
78132 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
78133 + error("%qE attribute applies to struct and union types only", name);
78134 + return NULL_TREE;
78135 + }
78136 +
78137 + *no_add_attrs = false;
78138 + constify_type(*node);
78139 + return NULL_TREE;
78140 +}
78141 +
78142 +static struct attribute_spec no_const_attr = {
78143 + .name = "no_const",
78144 + .min_length = 0,
78145 + .max_length = 0,
78146 + .decl_required = false,
78147 + .type_required = false,
78148 + .function_type_required = false,
78149 + .handler = handle_no_const_attribute,
78150 +#if BUILDING_GCC_VERSION >= 4007
78151 + .affects_type_identity = true
78152 +#endif
78153 +};
78154 +
78155 +static struct attribute_spec do_const_attr = {
78156 + .name = "do_const",
78157 + .min_length = 0,
78158 + .max_length = 0,
78159 + .decl_required = false,
78160 + .type_required = false,
78161 + .function_type_required = false,
78162 + .handler = handle_do_const_attribute,
78163 +#if BUILDING_GCC_VERSION >= 4007
78164 + .affects_type_identity = true
78165 +#endif
78166 +};
78167 +
78168 +static void register_attributes(void *event_data, void *data)
78169 +{
78170 + register_attribute(&no_const_attr);
78171 + register_attribute(&do_const_attr);
78172 +}
78173 +
78174 +static void constify_type(tree type)
78175 +{
78176 + TYPE_READONLY(type) = 1;
78177 + C_TYPE_FIELDS_READONLY(type) = 1;
78178 +}
78179 +
78180 +static bool is_fptr(tree field)
78181 +{
78182 + tree ptr = TREE_TYPE(field);
78183 +
78184 + if (TREE_CODE(ptr) != POINTER_TYPE)
78185 + return false;
78186 +
78187 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
78188 +}
78189 +
78190 +static bool walk_struct(tree node)
78191 +{
78192 + tree field;
78193 +
78194 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
78195 + return false;
78196 +
78197 + if (TYPE_FIELDS(node) == NULL_TREE)
78198 + return false;
78199 +
78200 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
78201 + tree type = TREE_TYPE(field);
78202 + enum tree_code code = TREE_CODE(type);
78203 + if (code == RECORD_TYPE || code == UNION_TYPE) {
78204 + if (!(walk_struct(type)))
78205 + return false;
78206 + } else if (!is_fptr(field) && !TREE_READONLY(field))
78207 + return false;
78208 + }
78209 + return true;
78210 +}
78211 +
78212 +static void finish_type(void *event_data, void *data)
78213 +{
78214 + tree type = (tree)event_data;
78215 +
78216 + if (type == NULL_TREE)
78217 + return;
78218 +
78219 + if (TYPE_READONLY(type))
78220 + return;
78221 +
78222 + if (walk_struct(type))
78223 + constify_type(type);
78224 +}
78225 +
78226 +static unsigned int check_local_variables(void);
78227 +
78228 +struct gimple_opt_pass pass_local_variable = {
78229 + {
78230 + .type = GIMPLE_PASS,
78231 + .name = "check_local_variables",
78232 + .gate = NULL,
78233 + .execute = check_local_variables,
78234 + .sub = NULL,
78235 + .next = NULL,
78236 + .static_pass_number = 0,
78237 + .tv_id = TV_NONE,
78238 + .properties_required = 0,
78239 + .properties_provided = 0,
78240 + .properties_destroyed = 0,
78241 + .todo_flags_start = 0,
78242 + .todo_flags_finish = 0
78243 + }
78244 +};
78245 +
78246 +static unsigned int check_local_variables(void)
78247 +{
78248 + tree var;
78249 + referenced_var_iterator rvi;
78250 +
78251 +#if BUILDING_GCC_VERSION == 4005
78252 + FOR_EACH_REFERENCED_VAR(var, rvi) {
78253 +#else
78254 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
78255 +#endif
78256 + tree type = TREE_TYPE(var);
78257 +
78258 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
78259 + continue;
78260 +
78261 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
78262 + continue;
78263 +
78264 + if (!TYPE_READONLY(type))
78265 + continue;
78266 +
78267 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
78268 +// continue;
78269 +
78270 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
78271 +// continue;
78272 +
78273 + if (walk_struct(type)) {
78274 + error("constified variable %qE cannot be local", var);
78275 + return 1;
78276 + }
78277 + }
78278 + return 0;
78279 +}
78280 +
78281 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78282 +{
78283 + const char * const plugin_name = plugin_info->base_name;
78284 + const int argc = plugin_info->argc;
78285 + const struct plugin_argument * const argv = plugin_info->argv;
78286 + int i;
78287 + bool constify = true;
78288 +
78289 + struct register_pass_info local_variable_pass_info = {
78290 + .pass = &pass_local_variable.pass,
78291 + .reference_pass_name = "*referenced_vars",
78292 + .ref_pass_instance_number = 0,
78293 + .pos_op = PASS_POS_INSERT_AFTER
78294 + };
78295 +
78296 + if (!plugin_default_version_check(version, &gcc_version)) {
78297 + error(G_("incompatible gcc/plugin versions"));
78298 + return 1;
78299 + }
78300 +
78301 + for (i = 0; i < argc; ++i) {
78302 + if (!(strcmp(argv[i].key, "no-constify"))) {
78303 + constify = false;
78304 + continue;
78305 + }
78306 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78307 + }
78308 +
78309 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
78310 + if (constify) {
78311 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
78312 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
78313 + }
78314 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
78315 +
78316 + return 0;
78317 +}
78318 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
78319 new file mode 100644
78320 index 0000000..a5eabce
78321 --- /dev/null
78322 +++ b/tools/gcc/kallocstat_plugin.c
78323 @@ -0,0 +1,167 @@
78324 +/*
78325 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78326 + * Licensed under the GPL v2
78327 + *
78328 + * Note: the choice of the license means that the compilation process is
78329 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78330 + * but for the kernel it doesn't matter since it doesn't link against
78331 + * any of the gcc libraries
78332 + *
78333 + * gcc plugin to find the distribution of k*alloc sizes
78334 + *
78335 + * TODO:
78336 + *
78337 + * BUGS:
78338 + * - none known
78339 + */
78340 +#include "gcc-plugin.h"
78341 +#include "config.h"
78342 +#include "system.h"
78343 +#include "coretypes.h"
78344 +#include "tree.h"
78345 +#include "tree-pass.h"
78346 +#include "flags.h"
78347 +#include "intl.h"
78348 +#include "toplev.h"
78349 +#include "plugin.h"
78350 +//#include "expr.h" where are you...
78351 +#include "diagnostic.h"
78352 +#include "plugin-version.h"
78353 +#include "tm.h"
78354 +#include "function.h"
78355 +#include "basic-block.h"
78356 +#include "gimple.h"
78357 +#include "rtl.h"
78358 +#include "emit-rtl.h"
78359 +
78360 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78361 +
78362 +int plugin_is_GPL_compatible;
78363 +
78364 +static const char * const kalloc_functions[] = {
78365 + "__kmalloc",
78366 + "kmalloc",
78367 + "kmalloc_large",
78368 + "kmalloc_node",
78369 + "kmalloc_order",
78370 + "kmalloc_order_trace",
78371 + "kmalloc_slab",
78372 + "kzalloc",
78373 + "kzalloc_node",
78374 +};
78375 +
78376 +static struct plugin_info kallocstat_plugin_info = {
78377 + .version = "201111150100",
78378 +};
78379 +
78380 +static unsigned int execute_kallocstat(void);
78381 +
78382 +static struct gimple_opt_pass kallocstat_pass = {
78383 + .pass = {
78384 + .type = GIMPLE_PASS,
78385 + .name = "kallocstat",
78386 + .gate = NULL,
78387 + .execute = execute_kallocstat,
78388 + .sub = NULL,
78389 + .next = NULL,
78390 + .static_pass_number = 0,
78391 + .tv_id = TV_NONE,
78392 + .properties_required = 0,
78393 + .properties_provided = 0,
78394 + .properties_destroyed = 0,
78395 + .todo_flags_start = 0,
78396 + .todo_flags_finish = 0
78397 + }
78398 +};
78399 +
78400 +static bool is_kalloc(const char *fnname)
78401 +{
78402 + size_t i;
78403 +
78404 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
78405 + if (!strcmp(fnname, kalloc_functions[i]))
78406 + return true;
78407 + return false;
78408 +}
78409 +
78410 +static unsigned int execute_kallocstat(void)
78411 +{
78412 + basic_block bb;
78413 +
78414 + // 1. loop through BBs and GIMPLE statements
78415 + FOR_EACH_BB(bb) {
78416 + gimple_stmt_iterator gsi;
78417 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78418 + // gimple match:
78419 + tree fndecl, size;
78420 + gimple call_stmt;
78421 + const char *fnname;
78422 +
78423 + // is it a call
78424 + call_stmt = gsi_stmt(gsi);
78425 + if (!is_gimple_call(call_stmt))
78426 + continue;
78427 + fndecl = gimple_call_fndecl(call_stmt);
78428 + if (fndecl == NULL_TREE)
78429 + continue;
78430 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
78431 + continue;
78432 +
78433 + // is it a call to k*alloc
78434 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
78435 + if (!is_kalloc(fnname))
78436 + continue;
78437 +
78438 + // is the size arg the result of a simple const assignment
78439 + size = gimple_call_arg(call_stmt, 0);
78440 + while (true) {
78441 + gimple def_stmt;
78442 + expanded_location xloc;
78443 + size_t size_val;
78444 +
78445 + if (TREE_CODE(size) != SSA_NAME)
78446 + break;
78447 + def_stmt = SSA_NAME_DEF_STMT(size);
78448 + if (!def_stmt || !is_gimple_assign(def_stmt))
78449 + break;
78450 + if (gimple_num_ops(def_stmt) != 2)
78451 + break;
78452 + size = gimple_assign_rhs1(def_stmt);
78453 + if (!TREE_CONSTANT(size))
78454 + continue;
78455 + xloc = expand_location(gimple_location(def_stmt));
78456 + if (!xloc.file)
78457 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
78458 + size_val = TREE_INT_CST_LOW(size);
78459 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
78460 + break;
78461 + }
78462 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78463 +//debug_tree(gimple_call_fn(call_stmt));
78464 +//print_node(stderr, "pax", fndecl, 4);
78465 + }
78466 + }
78467 +
78468 + return 0;
78469 +}
78470 +
78471 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78472 +{
78473 + const char * const plugin_name = plugin_info->base_name;
78474 + struct register_pass_info kallocstat_pass_info = {
78475 + .pass = &kallocstat_pass.pass,
78476 + .reference_pass_name = "ssa",
78477 + .ref_pass_instance_number = 0,
78478 + .pos_op = PASS_POS_INSERT_AFTER
78479 + };
78480 +
78481 + if (!plugin_default_version_check(version, &gcc_version)) {
78482 + error(G_("incompatible gcc/plugin versions"));
78483 + return 1;
78484 + }
78485 +
78486 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
78487 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
78488 +
78489 + return 0;
78490 +}
78491 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
78492 new file mode 100644
78493 index 0000000..008f159
78494 --- /dev/null
78495 +++ b/tools/gcc/kernexec_plugin.c
78496 @@ -0,0 +1,427 @@
78497 +/*
78498 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78499 + * Licensed under the GPL v2
78500 + *
78501 + * Note: the choice of the license means that the compilation process is
78502 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78503 + * but for the kernel it doesn't matter since it doesn't link against
78504 + * any of the gcc libraries
78505 + *
78506 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
78507 + *
78508 + * TODO:
78509 + *
78510 + * BUGS:
78511 + * - none known
78512 + */
78513 +#include "gcc-plugin.h"
78514 +#include "config.h"
78515 +#include "system.h"
78516 +#include "coretypes.h"
78517 +#include "tree.h"
78518 +#include "tree-pass.h"
78519 +#include "flags.h"
78520 +#include "intl.h"
78521 +#include "toplev.h"
78522 +#include "plugin.h"
78523 +//#include "expr.h" where are you...
78524 +#include "diagnostic.h"
78525 +#include "plugin-version.h"
78526 +#include "tm.h"
78527 +#include "function.h"
78528 +#include "basic-block.h"
78529 +#include "gimple.h"
78530 +#include "rtl.h"
78531 +#include "emit-rtl.h"
78532 +#include "tree-flow.h"
78533 +
78534 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78535 +extern rtx emit_move_insn(rtx x, rtx y);
78536 +
78537 +int plugin_is_GPL_compatible;
78538 +
78539 +static struct plugin_info kernexec_plugin_info = {
78540 + .version = "201111291120",
78541 + .help = "method=[bts|or]\tinstrumentation method\n"
78542 +};
78543 +
78544 +static unsigned int execute_kernexec_reload(void);
78545 +static unsigned int execute_kernexec_fptr(void);
78546 +static unsigned int execute_kernexec_retaddr(void);
78547 +static bool kernexec_cmodel_check(void);
78548 +
78549 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
78550 +static void (*kernexec_instrument_retaddr)(rtx);
78551 +
78552 +static struct gimple_opt_pass kernexec_reload_pass = {
78553 + .pass = {
78554 + .type = GIMPLE_PASS,
78555 + .name = "kernexec_reload",
78556 + .gate = kernexec_cmodel_check,
78557 + .execute = execute_kernexec_reload,
78558 + .sub = NULL,
78559 + .next = NULL,
78560 + .static_pass_number = 0,
78561 + .tv_id = TV_NONE,
78562 + .properties_required = 0,
78563 + .properties_provided = 0,
78564 + .properties_destroyed = 0,
78565 + .todo_flags_start = 0,
78566 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78567 + }
78568 +};
78569 +
78570 +static struct gimple_opt_pass kernexec_fptr_pass = {
78571 + .pass = {
78572 + .type = GIMPLE_PASS,
78573 + .name = "kernexec_fptr",
78574 + .gate = kernexec_cmodel_check,
78575 + .execute = execute_kernexec_fptr,
78576 + .sub = NULL,
78577 + .next = NULL,
78578 + .static_pass_number = 0,
78579 + .tv_id = TV_NONE,
78580 + .properties_required = 0,
78581 + .properties_provided = 0,
78582 + .properties_destroyed = 0,
78583 + .todo_flags_start = 0,
78584 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78585 + }
78586 +};
78587 +
78588 +static struct rtl_opt_pass kernexec_retaddr_pass = {
78589 + .pass = {
78590 + .type = RTL_PASS,
78591 + .name = "kernexec_retaddr",
78592 + .gate = kernexec_cmodel_check,
78593 + .execute = execute_kernexec_retaddr,
78594 + .sub = NULL,
78595 + .next = NULL,
78596 + .static_pass_number = 0,
78597 + .tv_id = TV_NONE,
78598 + .properties_required = 0,
78599 + .properties_provided = 0,
78600 + .properties_destroyed = 0,
78601 + .todo_flags_start = 0,
78602 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
78603 + }
78604 +};
78605 +
78606 +static bool kernexec_cmodel_check(void)
78607 +{
78608 + tree section;
78609 +
78610 + if (ix86_cmodel != CM_KERNEL)
78611 + return false;
78612 +
78613 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
78614 + if (!section || !TREE_VALUE(section))
78615 + return true;
78616 +
78617 + section = TREE_VALUE(TREE_VALUE(section));
78618 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
78619 + return true;
78620 +
78621 + return false;
78622 +}
78623 +
78624 +/*
78625 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
78626 + */
78627 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
78628 +{
78629 + gimple asm_movabs_stmt;
78630 +
78631 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
78632 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
78633 + gimple_asm_set_volatile(asm_movabs_stmt, true);
78634 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
78635 + update_stmt(asm_movabs_stmt);
78636 +}
78637 +
78638 +/*
78639 + * find all asm() stmts that clobber r10 and add a reload of r10
78640 + */
78641 +static unsigned int execute_kernexec_reload(void)
78642 +{
78643 + basic_block bb;
78644 +
78645 + // 1. loop through BBs and GIMPLE statements
78646 + FOR_EACH_BB(bb) {
78647 + gimple_stmt_iterator gsi;
78648 +
78649 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78650 + // gimple match: __asm__ ("" : : : "r10");
78651 + gimple asm_stmt;
78652 + size_t nclobbers;
78653 +
78654 + // is it an asm ...
78655 + asm_stmt = gsi_stmt(gsi);
78656 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
78657 + continue;
78658 +
78659 + // ... clobbering r10
78660 + nclobbers = gimple_asm_nclobbers(asm_stmt);
78661 + while (nclobbers--) {
78662 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
78663 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
78664 + continue;
78665 + kernexec_reload_fptr_mask(&gsi);
78666 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
78667 + break;
78668 + }
78669 + }
78670 + }
78671 +
78672 + return 0;
78673 +}
78674 +
78675 +/*
78676 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
78677 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
78678 + */
78679 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
78680 +{
78681 + gimple assign_intptr, assign_new_fptr, call_stmt;
78682 + tree intptr, old_fptr, new_fptr, kernexec_mask;
78683 +
78684 + call_stmt = gsi_stmt(*gsi);
78685 + old_fptr = gimple_call_fn(call_stmt);
78686 +
78687 + // create temporary unsigned long variable used for bitops and cast fptr to it
78688 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
78689 + add_referenced_var(intptr);
78690 + mark_sym_for_renaming(intptr);
78691 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
78692 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
78693 + update_stmt(assign_intptr);
78694 +
78695 + // apply logical or to temporary unsigned long and bitmask
78696 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
78697 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
78698 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
78699 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
78700 + update_stmt(assign_intptr);
78701 +
78702 + // cast temporary unsigned long back to a temporary fptr variable
78703 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
78704 + add_referenced_var(new_fptr);
78705 + mark_sym_for_renaming(new_fptr);
78706 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
78707 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
78708 + update_stmt(assign_new_fptr);
78709 +
78710 + // replace call stmt fn with the new fptr
78711 + gimple_call_set_fn(call_stmt, new_fptr);
78712 + update_stmt(call_stmt);
78713 +}
78714 +
78715 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
78716 +{
78717 + gimple asm_or_stmt, call_stmt;
78718 + tree old_fptr, new_fptr, input, output;
78719 + VEC(tree, gc) *inputs = NULL;
78720 + VEC(tree, gc) *outputs = NULL;
78721 +
78722 + call_stmt = gsi_stmt(*gsi);
78723 + old_fptr = gimple_call_fn(call_stmt);
78724 +
78725 + // create temporary fptr variable
78726 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
78727 + add_referenced_var(new_fptr);
78728 + mark_sym_for_renaming(new_fptr);
78729 +
78730 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
78731 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
78732 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
78733 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
78734 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
78735 + VEC_safe_push(tree, gc, inputs, input);
78736 + VEC_safe_push(tree, gc, outputs, output);
78737 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
78738 + gimple_asm_set_volatile(asm_or_stmt, true);
78739 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
78740 + update_stmt(asm_or_stmt);
78741 +
78742 + // replace call stmt fn with the new fptr
78743 + gimple_call_set_fn(call_stmt, new_fptr);
78744 + update_stmt(call_stmt);
78745 +}
78746 +
78747 +/*
78748 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
78749 + */
78750 +static unsigned int execute_kernexec_fptr(void)
78751 +{
78752 + basic_block bb;
78753 +
78754 + // 1. loop through BBs and GIMPLE statements
78755 + FOR_EACH_BB(bb) {
78756 + gimple_stmt_iterator gsi;
78757 +
78758 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78759 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
78760 + tree fn;
78761 + gimple call_stmt;
78762 +
78763 + // is it a call ...
78764 + call_stmt = gsi_stmt(gsi);
78765 + if (!is_gimple_call(call_stmt))
78766 + continue;
78767 + fn = gimple_call_fn(call_stmt);
78768 + if (TREE_CODE(fn) == ADDR_EXPR)
78769 + continue;
78770 + if (TREE_CODE(fn) != SSA_NAME)
78771 + gcc_unreachable();
78772 +
78773 + // ... through a function pointer
78774 + fn = SSA_NAME_VAR(fn);
78775 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
78776 + continue;
78777 + fn = TREE_TYPE(fn);
78778 + if (TREE_CODE(fn) != POINTER_TYPE)
78779 + continue;
78780 + fn = TREE_TYPE(fn);
78781 + if (TREE_CODE(fn) != FUNCTION_TYPE)
78782 + continue;
78783 +
78784 + kernexec_instrument_fptr(&gsi);
78785 +
78786 +//debug_tree(gimple_call_fn(call_stmt));
78787 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78788 + }
78789 + }
78790 +
78791 + return 0;
78792 +}
78793 +
78794 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
78795 +static void kernexec_instrument_retaddr_bts(rtx insn)
78796 +{
78797 + rtx btsq;
78798 + rtvec argvec, constraintvec, labelvec;
78799 + int line;
78800 +
78801 + // create asm volatile("btsq $63,(%%rsp)":::)
78802 + argvec = rtvec_alloc(0);
78803 + constraintvec = rtvec_alloc(0);
78804 + labelvec = rtvec_alloc(0);
78805 + line = expand_location(RTL_LOCATION(insn)).line;
78806 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78807 + MEM_VOLATILE_P(btsq) = 1;
78808 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
78809 + emit_insn_before(btsq, insn);
78810 +}
78811 +
78812 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
78813 +static void kernexec_instrument_retaddr_or(rtx insn)
78814 +{
78815 + rtx orq;
78816 + rtvec argvec, constraintvec, labelvec;
78817 + int line;
78818 +
78819 + // create asm volatile("orq %%r10,(%%rsp)":::)
78820 + argvec = rtvec_alloc(0);
78821 + constraintvec = rtvec_alloc(0);
78822 + labelvec = rtvec_alloc(0);
78823 + line = expand_location(RTL_LOCATION(insn)).line;
78824 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78825 + MEM_VOLATILE_P(orq) = 1;
78826 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
78827 + emit_insn_before(orq, insn);
78828 +}
78829 +
78830 +/*
78831 + * find all asm level function returns and forcibly set the highest bit of the return address
78832 + */
78833 +static unsigned int execute_kernexec_retaddr(void)
78834 +{
78835 + rtx insn;
78836 +
78837 + // 1. find function returns
78838 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78839 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
78840 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
78841 + rtx body;
78842 +
78843 + // is it a retn
78844 + if (!JUMP_P(insn))
78845 + continue;
78846 + body = PATTERN(insn);
78847 + if (GET_CODE(body) == PARALLEL)
78848 + body = XVECEXP(body, 0, 0);
78849 + if (GET_CODE(body) != RETURN)
78850 + continue;
78851 + kernexec_instrument_retaddr(insn);
78852 + }
78853 +
78854 +// print_simple_rtl(stderr, get_insns());
78855 +// print_rtl(stderr, get_insns());
78856 +
78857 + return 0;
78858 +}
78859 +
78860 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78861 +{
78862 + const char * const plugin_name = plugin_info->base_name;
78863 + const int argc = plugin_info->argc;
78864 + const struct plugin_argument * const argv = plugin_info->argv;
78865 + int i;
78866 + struct register_pass_info kernexec_reload_pass_info = {
78867 + .pass = &kernexec_reload_pass.pass,
78868 + .reference_pass_name = "ssa",
78869 + .ref_pass_instance_number = 0,
78870 + .pos_op = PASS_POS_INSERT_AFTER
78871 + };
78872 + struct register_pass_info kernexec_fptr_pass_info = {
78873 + .pass = &kernexec_fptr_pass.pass,
78874 + .reference_pass_name = "ssa",
78875 + .ref_pass_instance_number = 0,
78876 + .pos_op = PASS_POS_INSERT_AFTER
78877 + };
78878 + struct register_pass_info kernexec_retaddr_pass_info = {
78879 + .pass = &kernexec_retaddr_pass.pass,
78880 + .reference_pass_name = "pro_and_epilogue",
78881 + .ref_pass_instance_number = 0,
78882 + .pos_op = PASS_POS_INSERT_AFTER
78883 + };
78884 +
78885 + if (!plugin_default_version_check(version, &gcc_version)) {
78886 + error(G_("incompatible gcc/plugin versions"));
78887 + return 1;
78888 + }
78889 +
78890 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
78891 +
78892 + if (TARGET_64BIT == 0)
78893 + return 0;
78894 +
78895 + for (i = 0; i < argc; ++i) {
78896 + if (!strcmp(argv[i].key, "method")) {
78897 + if (!argv[i].value) {
78898 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78899 + continue;
78900 + }
78901 + if (!strcmp(argv[i].value, "bts")) {
78902 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
78903 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
78904 + } else if (!strcmp(argv[i].value, "or")) {
78905 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
78906 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
78907 + fix_register("r10", 1, 1);
78908 + } else
78909 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78910 + continue;
78911 + }
78912 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78913 + }
78914 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
78915 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
78916 +
78917 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
78918 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
78919 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
78920 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
78921 +
78922 + return 0;
78923 +}
78924 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
78925 new file mode 100644
78926 index 0000000..8b61031
78927 --- /dev/null
78928 +++ b/tools/gcc/stackleak_plugin.c
78929 @@ -0,0 +1,295 @@
78930 +/*
78931 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78932 + * Licensed under the GPL v2
78933 + *
78934 + * Note: the choice of the license means that the compilation process is
78935 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78936 + * but for the kernel it doesn't matter since it doesn't link against
78937 + * any of the gcc libraries
78938 + *
78939 + * gcc plugin to help implement various PaX features
78940 + *
78941 + * - track lowest stack pointer
78942 + *
78943 + * TODO:
78944 + * - initialize all local variables
78945 + *
78946 + * BUGS:
78947 + * - none known
78948 + */
78949 +#include "gcc-plugin.h"
78950 +#include "config.h"
78951 +#include "system.h"
78952 +#include "coretypes.h"
78953 +#include "tree.h"
78954 +#include "tree-pass.h"
78955 +#include "flags.h"
78956 +#include "intl.h"
78957 +#include "toplev.h"
78958 +#include "plugin.h"
78959 +//#include "expr.h" where are you...
78960 +#include "diagnostic.h"
78961 +#include "plugin-version.h"
78962 +#include "tm.h"
78963 +#include "function.h"
78964 +#include "basic-block.h"
78965 +#include "gimple.h"
78966 +#include "rtl.h"
78967 +#include "emit-rtl.h"
78968 +
78969 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78970 +
78971 +int plugin_is_GPL_compatible;
78972 +
78973 +static int track_frame_size = -1;
78974 +static const char track_function[] = "pax_track_stack";
78975 +static const char check_function[] = "pax_check_alloca";
78976 +static bool init_locals;
78977 +
78978 +static struct plugin_info stackleak_plugin_info = {
78979 + .version = "201111150100",
78980 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
78981 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
78982 +};
78983 +
78984 +static bool gate_stackleak_track_stack(void);
78985 +static unsigned int execute_stackleak_tree_instrument(void);
78986 +static unsigned int execute_stackleak_final(void);
78987 +
78988 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
78989 + .pass = {
78990 + .type = GIMPLE_PASS,
78991 + .name = "stackleak_tree_instrument",
78992 + .gate = gate_stackleak_track_stack,
78993 + .execute = execute_stackleak_tree_instrument,
78994 + .sub = NULL,
78995 + .next = NULL,
78996 + .static_pass_number = 0,
78997 + .tv_id = TV_NONE,
78998 + .properties_required = PROP_gimple_leh | PROP_cfg,
78999 + .properties_provided = 0,
79000 + .properties_destroyed = 0,
79001 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
79002 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
79003 + }
79004 +};
79005 +
79006 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
79007 + .pass = {
79008 + .type = RTL_PASS,
79009 + .name = "stackleak_final",
79010 + .gate = gate_stackleak_track_stack,
79011 + .execute = execute_stackleak_final,
79012 + .sub = NULL,
79013 + .next = NULL,
79014 + .static_pass_number = 0,
79015 + .tv_id = TV_NONE,
79016 + .properties_required = 0,
79017 + .properties_provided = 0,
79018 + .properties_destroyed = 0,
79019 + .todo_flags_start = 0,
79020 + .todo_flags_finish = TODO_dump_func
79021 + }
79022 +};
79023 +
79024 +static bool gate_stackleak_track_stack(void)
79025 +{
79026 + return track_frame_size >= 0;
79027 +}
79028 +
79029 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
79030 +{
79031 + gimple check_alloca;
79032 + tree fndecl, fntype, alloca_size;
79033 +
79034 + // insert call to void pax_check_alloca(unsigned long size)
79035 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
79036 + fndecl = build_fn_decl(check_function, fntype);
79037 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
79038 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
79039 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
79040 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
79041 +}
79042 +
79043 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
79044 +{
79045 + gimple track_stack;
79046 + tree fndecl, fntype;
79047 +
79048 + // insert call to void pax_track_stack(void)
79049 + fntype = build_function_type_list(void_type_node, NULL_TREE);
79050 + fndecl = build_fn_decl(track_function, fntype);
79051 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
79052 + track_stack = gimple_build_call(fndecl, 0);
79053 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
79054 +}
79055 +
79056 +#if BUILDING_GCC_VERSION == 4005
79057 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
79058 +{
79059 + tree fndecl;
79060 +
79061 + if (!is_gimple_call(stmt))
79062 + return false;
79063 + fndecl = gimple_call_fndecl(stmt);
79064 + if (!fndecl)
79065 + return false;
79066 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
79067 + return false;
79068 +// print_node(stderr, "pax", fndecl, 4);
79069 + return DECL_FUNCTION_CODE(fndecl) == code;
79070 +}
79071 +#endif
79072 +
79073 +static bool is_alloca(gimple stmt)
79074 +{
79075 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
79076 + return true;
79077 +
79078 +#if BUILDING_GCC_VERSION >= 4007
79079 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
79080 + return true;
79081 +#endif
79082 +
79083 + return false;
79084 +}
79085 +
79086 +static unsigned int execute_stackleak_tree_instrument(void)
79087 +{
79088 + basic_block bb, entry_bb;
79089 + bool prologue_instrumented = false;
79090 +
79091 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
79092 +
79093 + // 1. loop through BBs and GIMPLE statements
79094 + FOR_EACH_BB(bb) {
79095 + gimple_stmt_iterator gsi;
79096 +
79097 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79098 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
79099 + if (!is_alloca(gsi_stmt(gsi)))
79100 + continue;
79101 +
79102 + // 2. insert stack overflow check before each __builtin_alloca call
79103 + stackleak_check_alloca(&gsi);
79104 +
79105 + // 3. insert track call after each __builtin_alloca call
79106 + stackleak_add_instrumentation(&gsi);
79107 + if (bb == entry_bb)
79108 + prologue_instrumented = true;
79109 + }
79110 + }
79111 +
79112 + // 4. insert track call at the beginning
79113 + if (!prologue_instrumented) {
79114 + gimple_stmt_iterator gsi;
79115 +
79116 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
79117 + if (dom_info_available_p(CDI_DOMINATORS))
79118 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
79119 + gsi = gsi_start_bb(bb);
79120 + stackleak_add_instrumentation(&gsi);
79121 + }
79122 +
79123 + return 0;
79124 +}
79125 +
79126 +static unsigned int execute_stackleak_final(void)
79127 +{
79128 + rtx insn;
79129 +
79130 + if (cfun->calls_alloca)
79131 + return 0;
79132 +
79133 + // keep calls only if function frame is big enough
79134 + if (get_frame_size() >= track_frame_size)
79135 + return 0;
79136 +
79137 + // 1. find pax_track_stack calls
79138 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
79139 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
79140 + rtx body;
79141 +
79142 + if (!CALL_P(insn))
79143 + continue;
79144 + body = PATTERN(insn);
79145 + if (GET_CODE(body) != CALL)
79146 + continue;
79147 + body = XEXP(body, 0);
79148 + if (GET_CODE(body) != MEM)
79149 + continue;
79150 + body = XEXP(body, 0);
79151 + if (GET_CODE(body) != SYMBOL_REF)
79152 + continue;
79153 + if (strcmp(XSTR(body, 0), track_function))
79154 + continue;
79155 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
79156 + // 2. delete call
79157 + insn = delete_insn_and_edges(insn);
79158 +#if BUILDING_GCC_VERSION >= 4007
79159 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
79160 + insn = delete_insn_and_edges(insn);
79161 +#endif
79162 + }
79163 +
79164 +// print_simple_rtl(stderr, get_insns());
79165 +// print_rtl(stderr, get_insns());
79166 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
79167 +
79168 + return 0;
79169 +}
79170 +
79171 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79172 +{
79173 + const char * const plugin_name = plugin_info->base_name;
79174 + const int argc = plugin_info->argc;
79175 + const struct plugin_argument * const argv = plugin_info->argv;
79176 + int i;
79177 + struct register_pass_info stackleak_tree_instrument_pass_info = {
79178 + .pass = &stackleak_tree_instrument_pass.pass,
79179 +// .reference_pass_name = "tree_profile",
79180 + .reference_pass_name = "optimized",
79181 + .ref_pass_instance_number = 0,
79182 + .pos_op = PASS_POS_INSERT_AFTER
79183 + };
79184 + struct register_pass_info stackleak_final_pass_info = {
79185 + .pass = &stackleak_final_rtl_opt_pass.pass,
79186 + .reference_pass_name = "final",
79187 + .ref_pass_instance_number = 0,
79188 + .pos_op = PASS_POS_INSERT_BEFORE
79189 + };
79190 +
79191 + if (!plugin_default_version_check(version, &gcc_version)) {
79192 + error(G_("incompatible gcc/plugin versions"));
79193 + return 1;
79194 + }
79195 +
79196 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
79197 +
79198 + for (i = 0; i < argc; ++i) {
79199 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
79200 + if (!argv[i].value) {
79201 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79202 + continue;
79203 + }
79204 + track_frame_size = atoi(argv[i].value);
79205 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
79206 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
79207 + continue;
79208 + }
79209 + if (!strcmp(argv[i].key, "initialize-locals")) {
79210 + if (argv[i].value) {
79211 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
79212 + continue;
79213 + }
79214 + init_locals = true;
79215 + continue;
79216 + }
79217 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79218 + }
79219 +
79220 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
79221 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
79222 +
79223 + return 0;
79224 +}
79225 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
79226 index 6789d78..4afd019 100644
79227 --- a/tools/perf/util/include/asm/alternative-asm.h
79228 +++ b/tools/perf/util/include/asm/alternative-asm.h
79229 @@ -5,4 +5,7 @@
79230
79231 #define altinstruction_entry #
79232
79233 + .macro pax_force_retaddr rip=0, reload=0
79234 + .endm
79235 +
79236 #endif
79237 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
79238 index af0f22f..9a7d479 100644
79239 --- a/usr/gen_init_cpio.c
79240 +++ b/usr/gen_init_cpio.c
79241 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
79242 int retval;
79243 int rc = -1;
79244 int namesize;
79245 - int i;
79246 + unsigned int i;
79247
79248 mode |= S_IFREG;
79249
79250 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
79251 *env_var = *expanded = '\0';
79252 strncat(env_var, start + 2, end - start - 2);
79253 strncat(expanded, new_location, start - new_location);
79254 - strncat(expanded, getenv(env_var), PATH_MAX);
79255 - strncat(expanded, end + 1, PATH_MAX);
79256 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
79257 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
79258 strncpy(new_location, expanded, PATH_MAX);
79259 + new_location[PATH_MAX] = 0;
79260 } else
79261 break;
79262 }
79263 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
79264 index d9cfb78..4f27c10 100644
79265 --- a/virt/kvm/kvm_main.c
79266 +++ b/virt/kvm/kvm_main.c
79267 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
79268
79269 static cpumask_var_t cpus_hardware_enabled;
79270 static int kvm_usage_count = 0;
79271 -static atomic_t hardware_enable_failed;
79272 +static atomic_unchecked_t hardware_enable_failed;
79273
79274 struct kmem_cache *kvm_vcpu_cache;
79275 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
79276 @@ -2268,7 +2268,7 @@ static void hardware_enable_nolock(void *junk)
79277
79278 if (r) {
79279 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
79280 - atomic_inc(&hardware_enable_failed);
79281 + atomic_inc_unchecked(&hardware_enable_failed);
79282 printk(KERN_INFO "kvm: enabling virtualization on "
79283 "CPU%d failed\n", cpu);
79284 }
79285 @@ -2322,10 +2322,10 @@ static int hardware_enable_all(void)
79286
79287 kvm_usage_count++;
79288 if (kvm_usage_count == 1) {
79289 - atomic_set(&hardware_enable_failed, 0);
79290 + atomic_set_unchecked(&hardware_enable_failed, 0);
79291 on_each_cpu(hardware_enable_nolock, NULL, 1);
79292
79293 - if (atomic_read(&hardware_enable_failed)) {
79294 + if (atomic_read_unchecked(&hardware_enable_failed)) {
79295 hardware_disable_all_nolock();
79296 r = -EBUSY;
79297 }
79298 @@ -2676,7 +2676,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
79299 kvm_arch_vcpu_put(vcpu);
79300 }
79301
79302 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79303 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79304 struct module *module)
79305 {
79306 int r;
79307 @@ -2739,7 +2739,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79308 if (!vcpu_align)
79309 vcpu_align = __alignof__(struct kvm_vcpu);
79310 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
79311 - 0, NULL);
79312 + SLAB_USERCOPY, NULL);
79313 if (!kvm_vcpu_cache) {
79314 r = -ENOMEM;
79315 goto out_free_3;
79316 @@ -2749,9 +2749,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79317 if (r)
79318 goto out_free;
79319
79320 - kvm_chardev_ops.owner = module;
79321 - kvm_vm_fops.owner = module;
79322 - kvm_vcpu_fops.owner = module;
79323 + pax_open_kernel();
79324 + *(void **)&kvm_chardev_ops.owner = module;
79325 + *(void **)&kvm_vm_fops.owner = module;
79326 + *(void **)&kvm_vcpu_fops.owner = module;
79327 + pax_close_kernel();
79328
79329 r = misc_register(&kvm_dev);
79330 if (r) {